net/mlx5: HWS, renamed the files in accordance with naming convention
authorYevgeny Kliteynik <kliteyn@nvidia.com>
Thu, 31 Oct 2024 12:58:54 +0000 (14:58 +0200)
committerJakub Kicinski <kuba@kernel.org>
Sun, 3 Nov 2024 23:37:15 +0000 (15:37 -0800)
Removed the 'mlx5hws_' file name prefix from the internal HWS files.

Signed-off-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/20241031125856.530927-4-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
65 files changed:
drivers/net/ethernet/mellanox/mlx5/core/Makefile
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.h [new file with mode: 0644]

index 42411fe772abbcf85e37c6b3eed03564a24784c5..be3d0876c521d17ba15bbf292b637faf67154be6 100644 (file)
@@ -136,21 +136,21 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/sws/dr_domain.o \
 #
 # HW Steering
 #
-mlx5_core-$(CONFIG_MLX5_HW_STEERING) += steering/hws/mlx5hws_cmd.o \
-                                       steering/hws/mlx5hws_context.o \
-                                       steering/hws/mlx5hws_pat_arg.o \
-                                       steering/hws/mlx5hws_buddy.o \
-                                       steering/hws/mlx5hws_pool.o \
-                                       steering/hws/mlx5hws_table.o \
-                                       steering/hws/mlx5hws_action.o \
-                                       steering/hws/mlx5hws_rule.o \
-                                       steering/hws/mlx5hws_matcher.o \
-                                       steering/hws/mlx5hws_send.o \
-                                       steering/hws/mlx5hws_definer.o \
-                                       steering/hws/mlx5hws_bwc.o \
-                                       steering/hws/mlx5hws_debug.o \
-                                       steering/hws/mlx5hws_vport.o \
-                                       steering/hws/mlx5hws_bwc_complex.o
+mlx5_core-$(CONFIG_MLX5_HW_STEERING) += steering/hws/cmd.o \
+                                       steering/hws/context.o \
+                                       steering/hws/pat_arg.o \
+                                       steering/hws/buddy.o \
+                                       steering/hws/pool.o \
+                                       steering/hws/table.o \
+                                       steering/hws/action.o \
+                                       steering/hws/rule.o \
+                                       steering/hws/matcher.o \
+                                       steering/hws/send.o \
+                                       steering/hws/definer.o \
+                                       steering/hws/bwc.o \
+                                       steering/hws/debug.o \
+                                       steering/hws/vport.o \
+                                       steering/hws/bwc_complex.o
 
 
 #
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
new file mode 100644 (file)
index 0000000..a897cdc
--- /dev/null
@@ -0,0 +1,2604 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+#define MLX5HWS_ACTION_METER_INIT_COLOR_OFFSET 1
+
+/* Header removal size limited to 128B (64 words) */
+#define MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE 128
+
+/* This is the longest supported action sequence for FDB table:
+ * DECAP, POP_VLAN, MODIFY, CTR, ASO, PUSH_VLAN, MODIFY, ENCAP, Term.
+ */
+static const u32 action_order_arr[MLX5HWS_TABLE_TYPE_MAX][MLX5HWS_ACTION_TYP_MAX] = {
+       [MLX5HWS_TABLE_TYPE_FDB] = {
+               BIT(MLX5HWS_ACTION_TYP_REMOVE_HEADER) |
+               BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) |
+               BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2),
+               BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
+               BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
+               BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
+               BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
+               BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
+               BIT(MLX5HWS_ACTION_TYP_INSERT_HEADER) |
+               BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) |
+               BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3),
+               BIT(MLX5HWS_ACTION_TYP_CTR),
+               BIT(MLX5HWS_ACTION_TYP_TAG),
+               BIT(MLX5HWS_ACTION_TYP_ASO_METER),
+               BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
+               BIT(MLX5HWS_ACTION_TYP_TBL) |
+               BIT(MLX5HWS_ACTION_TYP_VPORT) |
+               BIT(MLX5HWS_ACTION_TYP_DROP) |
+               BIT(MLX5HWS_ACTION_TYP_SAMPLER) |
+               BIT(MLX5HWS_ACTION_TYP_RANGE) |
+               BIT(MLX5HWS_ACTION_TYP_DEST_ARRAY),
+               BIT(MLX5HWS_ACTION_TYP_LAST),
+       },
+};
+
+static const char * const mlx5hws_action_type_str[] = {
+       [MLX5HWS_ACTION_TYP_LAST] = "LAST",
+       [MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2] = "TNL_L2_TO_L2",
+       [MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2] = "L2_TO_TNL_L2",
+       [MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2] = "TNL_L3_TO_L2",
+       [MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3] = "L2_TO_TNL_L3",
+       [MLX5HWS_ACTION_TYP_DROP] = "DROP",
+       [MLX5HWS_ACTION_TYP_TBL] = "TBL",
+       [MLX5HWS_ACTION_TYP_CTR] = "CTR",
+       [MLX5HWS_ACTION_TYP_TAG] = "TAG",
+       [MLX5HWS_ACTION_TYP_MODIFY_HDR] = "MODIFY_HDR",
+       [MLX5HWS_ACTION_TYP_VPORT] = "VPORT",
+       [MLX5HWS_ACTION_TYP_MISS] = "DEFAULT_MISS",
+       [MLX5HWS_ACTION_TYP_POP_VLAN] = "POP_VLAN",
+       [MLX5HWS_ACTION_TYP_PUSH_VLAN] = "PUSH_VLAN",
+       [MLX5HWS_ACTION_TYP_ASO_METER] = "ASO_METER",
+       [MLX5HWS_ACTION_TYP_DEST_ARRAY] = "DEST_ARRAY",
+       [MLX5HWS_ACTION_TYP_INSERT_HEADER] = "INSERT_HEADER",
+       [MLX5HWS_ACTION_TYP_REMOVE_HEADER] = "REMOVE_HEADER",
+       [MLX5HWS_ACTION_TYP_SAMPLER] = "SAMPLER",
+       [MLX5HWS_ACTION_TYP_RANGE] = "RANGE",
+};
+
+static_assert(ARRAY_SIZE(mlx5hws_action_type_str) == MLX5HWS_ACTION_TYP_MAX,
+             "Missing mlx5hws_action_type_str");
+
+const char *mlx5hws_action_type_to_str(enum mlx5hws_action_type action_type)
+{
+       return mlx5hws_action_type_str[action_type];
+}
+
+enum mlx5hws_action_type mlx5hws_action_get_type(struct mlx5hws_action *action)
+{
+       return action->type;
+}
+
+static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
+                                        enum mlx5hws_context_shared_stc_type stc_type,
+                                        u8 tbl_type)
+{
+       struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+       struct mlx5hws_action_shared_stc *shared_stc;
+       int ret;
+
+       mutex_lock(&ctx->ctrl_lock);
+       if (ctx->common_res[tbl_type].shared_stc[stc_type]) {
+               ctx->common_res[tbl_type].shared_stc[stc_type]->refcount++;
+               mutex_unlock(&ctx->ctrl_lock);
+               return 0;
+       }
+
+       shared_stc = kzalloc(sizeof(*shared_stc), GFP_KERNEL);
+       if (!shared_stc) {
+               ret = -ENOMEM;
+               goto unlock_and_out;
+       }
+       switch (stc_type) {
+       case MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3:
+               stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE;
+               stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+               stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+               stc_attr.remove_header.decap = 0;
+               stc_attr.remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+               stc_attr.remove_header.end_anchor = MLX5_HEADER_ANCHOR_IPV6_IPV4;
+               break;
+       case MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP:
+               stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS;
+               stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+               stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+               stc_attr.remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START;
+               stc_attr.remove_words.num_of_words = MLX5HWS_ACTION_HDR_LEN_L2_VLAN;
+               break;
+       default:
+               mlx5hws_err(ctx, "No such stc_type: %d\n", stc_type);
+               pr_warn("HWS: Invalid stc_type: %d\n", stc_type);
+               ret = -EINVAL;
+               goto unlock_and_out;
+       }
+
+       ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+                                             &shared_stc->stc_chunk);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to allocate shared decap l2 STC\n");
+               goto free_shared_stc;
+       }
+
+       ctx->common_res[tbl_type].shared_stc[stc_type] = shared_stc;
+       ctx->common_res[tbl_type].shared_stc[stc_type]->refcount = 1;
+
+       mutex_unlock(&ctx->ctrl_lock);
+
+       return 0;
+
+free_shared_stc:
+       kfree(shared_stc);
+unlock_and_out:
+       mutex_unlock(&ctx->ctrl_lock);
+       return ret;
+}
+
+static int hws_action_get_shared_stc(struct mlx5hws_action *action,
+                                    enum mlx5hws_context_shared_stc_type stc_type)
+{
+       struct mlx5hws_context *ctx = action->ctx;
+       int ret;
+
+       if (stc_type >= MLX5HWS_CONTEXT_SHARED_STC_MAX) {
+               pr_warn("HWS: Invalid shared stc_type: %d\n", stc_type);
+               return -EINVAL;
+       }
+
+       if (unlikely(!(action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB))) {
+               pr_warn("HWS: Invalid action->flags: %d\n", action->flags);
+               return -EINVAL;
+       }
+
+       ret = hws_action_get_shared_stc_nic(ctx, stc_type, MLX5HWS_TABLE_TYPE_FDB);
+       if (ret) {
+               mlx5hws_err(ctx,
+                           "Failed to allocate memory for FDB shared STCs (type: %d)\n",
+                           stc_type);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void hws_action_put_shared_stc(struct mlx5hws_action *action,
+                                     enum mlx5hws_context_shared_stc_type stc_type)
+{
+       enum mlx5hws_table_type tbl_type = MLX5HWS_TABLE_TYPE_FDB;
+       struct mlx5hws_action_shared_stc *shared_stc;
+       struct mlx5hws_context *ctx = action->ctx;
+
+       if (stc_type >= MLX5HWS_CONTEXT_SHARED_STC_MAX) {
+               pr_warn("HWS: Invalid shared stc_type: %d\n", stc_type);
+               return;
+       }
+
+       mutex_lock(&ctx->ctrl_lock);
+       if (--ctx->common_res[tbl_type].shared_stc[stc_type]->refcount) {
+               mutex_unlock(&ctx->ctrl_lock);
+               return;
+       }
+
+       shared_stc = ctx->common_res[tbl_type].shared_stc[stc_type];
+
+       mlx5hws_action_free_single_stc(ctx, tbl_type, &shared_stc->stc_chunk);
+       kfree(shared_stc);
+       ctx->common_res[tbl_type].shared_stc[stc_type] = NULL;
+       mutex_unlock(&ctx->ctrl_lock);
+}
+
+static void hws_action_print_combo(struct mlx5hws_context *ctx,
+                                  enum mlx5hws_action_type *user_actions)
+{
+       mlx5hws_err(ctx, "Invalid action_type sequence");
+       while (*user_actions != MLX5HWS_ACTION_TYP_LAST) {
+               mlx5hws_err(ctx, " %s", mlx5hws_action_type_to_str(*user_actions));
+               user_actions++;
+       }
+       mlx5hws_err(ctx, "\n");
+}
+
+bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx,
+                               enum mlx5hws_action_type *user_actions,
+                               enum mlx5hws_table_type table_type)
+{
+       const u32 *order_arr = action_order_arr[table_type];
+       u8 order_idx = 0;
+       u8 user_idx = 0;
+       bool valid_combo;
+
+       if (table_type >= MLX5HWS_TABLE_TYPE_MAX) {
+               mlx5hws_err(ctx, "Invalid table_type %d", table_type);
+               return false;
+       }
+
+       while (order_arr[order_idx] != BIT(MLX5HWS_ACTION_TYP_LAST)) {
+               /* User action order validated move to next user action */
+               if (BIT(user_actions[user_idx]) & order_arr[order_idx])
+                       user_idx++;
+
+               /* Iterate to the next supported action in the order */
+               order_idx++;
+       }
+
+       /* Combination is valid if all user action were processed */
+       valid_combo = user_actions[user_idx] == MLX5HWS_ACTION_TYP_LAST;
+       if (!valid_combo)
+               hws_action_print_combo(ctx, user_actions);
+
+       return valid_combo;
+}
+
+static bool
+hws_action_fixup_stc_attr(struct mlx5hws_context *ctx,
+                         struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+                         struct mlx5hws_cmd_stc_modify_attr *fixup_stc_attr,
+                         enum mlx5hws_table_type table_type,
+                         bool is_mirror)
+{
+       bool use_fixup = false;
+       u32 fw_tbl_type;
+       u32 base_id;
+
+       fw_tbl_type = mlx5hws_table_get_res_fw_ft_type(table_type, is_mirror);
+
+       switch (stc_attr->action_type) {
+       case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE:
+               if (is_mirror && stc_attr->ste_table.ignore_tx) {
+                       fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+                       fixup_stc_attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+                       fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+                       use_fixup = true;
+                       break;
+               }
+               if (!is_mirror)
+                       base_id = mlx5hws_pool_chunk_get_base_id(stc_attr->ste_table.ste_pool,
+                                                                &stc_attr->ste_table.ste);
+               else
+                       base_id =
+                               mlx5hws_pool_chunk_get_base_mirror_id(stc_attr->ste_table.ste_pool,
+                                                                     &stc_attr->ste_table.ste);
+
+               *fixup_stc_attr = *stc_attr;
+               fixup_stc_attr->ste_table.ste_obj_id = base_id;
+               use_fixup = true;
+               break;
+
+       case MLX5_IFC_STC_ACTION_TYPE_TAG:
+               if (fw_tbl_type == FS_FT_FDB_TX) {
+                       fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_NOP;
+                       fixup_stc_attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+                       fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+                       use_fixup = true;
+               }
+               break;
+
+       case MLX5_IFC_STC_ACTION_TYPE_ALLOW:
+               if (fw_tbl_type == FS_FT_FDB_TX || fw_tbl_type == FS_FT_FDB_RX) {
+                       fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT;
+                       fixup_stc_attr->action_offset = stc_attr->action_offset;
+                       fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+                       fixup_stc_attr->vport.esw_owner_vhca_id = ctx->caps->vhca_id;
+                       fixup_stc_attr->vport.vport_num = ctx->caps->eswitch_manager_vport_number;
+                       fixup_stc_attr->vport.eswitch_owner_vhca_id_valid =
+                               ctx->caps->merged_eswitch;
+                       use_fixup = true;
+               }
+               break;
+
+       case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT:
+               if (stc_attr->vport.vport_num != MLX5_VPORT_UPLINK)
+                       break;
+
+               if (fw_tbl_type == FS_FT_FDB_TX || fw_tbl_type == FS_FT_FDB_RX) {
+                       /* The FW doesn't allow to go to wire in the TX/RX by JUMP_TO_VPORT */
+                       fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK;
+                       fixup_stc_attr->action_offset = stc_attr->action_offset;
+                       fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+                       fixup_stc_attr->vport.vport_num = 0;
+                       fixup_stc_attr->vport.esw_owner_vhca_id = stc_attr->vport.esw_owner_vhca_id;
+                       fixup_stc_attr->vport.eswitch_owner_vhca_id_valid =
+                               stc_attr->vport.eswitch_owner_vhca_id_valid;
+               }
+               use_fixup = true;
+               break;
+
+       default:
+               break;
+       }
+
+       return use_fixup;
+}
+
+int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx,
+                                   struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+                                   u32 table_type,
+                                   struct mlx5hws_pool_chunk *stc)
+__must_hold(&ctx->ctrl_lock)
+{
+       struct mlx5hws_cmd_stc_modify_attr cleanup_stc_attr = {0};
+       struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type];
+       struct mlx5hws_cmd_stc_modify_attr fixup_stc_attr = {0};
+       bool use_fixup;
+       u32 obj_0_id;
+       int ret;
+
+       ret = mlx5hws_pool_chunk_alloc(stc_pool, stc);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to allocate single action STC\n");
+               return ret;
+       }
+
+       stc_attr->stc_offset = stc->offset;
+
+       /* Dynamic reparse not supported, overwrite and use default */
+       if (!mlx5hws_context_cap_dynamic_reparse(ctx))
+               stc_attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+
+       obj_0_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc);
+
+       /* According to table/action limitation change the stc_attr */
+       use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr, &fixup_stc_attr, table_type, false);
+       ret = mlx5hws_cmd_stc_modify(ctx->mdev, obj_0_id,
+                                    use_fixup ? &fixup_stc_attr : stc_attr);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to modify STC action_type %d tbl_type %d\n",
+                           stc_attr->action_type, table_type);
+               goto free_chunk;
+       }
+
+       /* Modify the FDB peer */
+       if (table_type == MLX5HWS_TABLE_TYPE_FDB) {
+               u32 obj_1_id;
+
+               obj_1_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc);
+
+               use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr,
+                                                     &fixup_stc_attr,
+                                                     table_type, true);
+               ret = mlx5hws_cmd_stc_modify(ctx->mdev, obj_1_id,
+                                            use_fixup ? &fixup_stc_attr : stc_attr);
+               if (ret) {
+                       mlx5hws_err(ctx,
+                                   "Failed to modify peer STC action_type %d tbl_type %d\n",
+                                   stc_attr->action_type, table_type);
+                       goto clean_obj_0;
+               }
+       }
+
+       return 0;
+
+clean_obj_0:
+       cleanup_stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+       cleanup_stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+       cleanup_stc_attr.stc_offset = stc->offset;
+       mlx5hws_cmd_stc_modify(ctx->mdev, obj_0_id, &cleanup_stc_attr);
+free_chunk:
+       mlx5hws_pool_chunk_free(stc_pool, stc);
+       return ret;
+}
+
+void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx,
+                                   u32 table_type,
+                                   struct mlx5hws_pool_chunk *stc)
+__must_hold(&ctx->ctrl_lock)
+{
+       struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type];
+       struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+       u32 obj_id;
+
+       /* Modify the STC not to point to an object */
+       stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+       stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+       stc_attr.stc_offset = stc->offset;
+       obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc);
+       mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr);
+
+       if (table_type == MLX5HWS_TABLE_TYPE_FDB) {
+               obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc);
+               mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr);
+       }
+
+       mlx5hws_pool_chunk_free(stc_pool, stc);
+}
+
+static u32 hws_action_get_mh_stc_type(struct mlx5hws_context *ctx,
+                                     __be64 pattern)
+{
+       u8 action_type = MLX5_GET(set_action_in, &pattern, action_type);
+
+       switch (action_type) {
+       case MLX5_MODIFICATION_TYPE_SET:
+               return MLX5_IFC_STC_ACTION_TYPE_SET;
+       case MLX5_MODIFICATION_TYPE_ADD:
+               return MLX5_IFC_STC_ACTION_TYPE_ADD;
+       case MLX5_MODIFICATION_TYPE_COPY:
+               return MLX5_IFC_STC_ACTION_TYPE_COPY;
+       case MLX5_MODIFICATION_TYPE_ADD_FIELD:
+               return MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD;
+       default:
+               mlx5hws_err(ctx, "Unsupported action type: 0x%x\n", action_type);
+               return MLX5_IFC_STC_ACTION_TYPE_NOP;
+       }
+}
+
+static void hws_action_fill_stc_attr(struct mlx5hws_action *action,
+                                    u32 obj_id,
+                                    struct mlx5hws_cmd_stc_modify_attr *attr)
+{
+       attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+
+       switch (action->type) {
+       case MLX5HWS_ACTION_TYP_TAG:
+               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_TAG;
+               attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+               break;
+       case MLX5HWS_ACTION_TYP_DROP:
+               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+               attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+               break;
+       case MLX5HWS_ACTION_TYP_MISS:
+               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW;
+               attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+               break;
+       case MLX5HWS_ACTION_TYP_CTR:
+               attr->id = obj_id;
+               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_COUNTER;
+               attr->action_offset = MLX5HWS_ACTION_OFFSET_DW0;
+               break;
+       case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+       case MLX5HWS_ACTION_TYP_MODIFY_HDR:
+               attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+               attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+               if (action->modify_header.require_reparse)
+                       attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+
+               if (action->modify_header.num_of_actions == 1) {
+                       attr->modify_action.data = action->modify_header.single_action;
+                       attr->action_type = hws_action_get_mh_stc_type(action->ctx,
+                                                                      attr->modify_action.data);
+
+                       if (attr->action_type == MLX5_IFC_STC_ACTION_TYPE_ADD ||
+                           attr->action_type == MLX5_IFC_STC_ACTION_TYPE_SET)
+                               MLX5_SET(set_action_in, &attr->modify_action.data, data, 0);
+               } else {
+                       attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST;
+                       attr->modify_header.arg_id = action->modify_header.arg_id;
+                       attr->modify_header.pattern_id = action->modify_header.pat_id;
+               }
+               break;
+       case MLX5HWS_ACTION_TYP_TBL:
+       case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT;
+               attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+               attr->dest_table_id = obj_id;
+               break;
+       case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE;
+               attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+               attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+               attr->remove_header.decap = 1;
+               attr->remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+               attr->remove_header.end_anchor = MLX5_HEADER_ANCHOR_INNER_MAC;
+               break;
+       case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+       case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+       case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+               attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+               if (!action->reformat.require_reparse)
+                       attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+
+               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT;
+               attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+               attr->insert_header.encap = action->reformat.encap;
+               attr->insert_header.insert_anchor = action->reformat.anchor;
+               attr->insert_header.arg_id = action->reformat.arg_id;
+               attr->insert_header.header_size = action->reformat.header_size;
+               attr->insert_header.insert_offset = action->reformat.offset;
+               break;
+       case MLX5HWS_ACTION_TYP_ASO_METER:
+               attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ASO;
+               attr->aso.aso_type = ASO_OPC_MOD_POLICER;
+               attr->aso.devx_obj_id = obj_id;
+               attr->aso.return_reg_id = action->aso.return_reg_id;
+               break;
+       case MLX5HWS_ACTION_TYP_VPORT:
+               attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT;
+               attr->vport.vport_num = action->vport.vport_num;
+               attr->vport.esw_owner_vhca_id = action->vport.esw_owner_vhca_id;
+               attr->vport.eswitch_owner_vhca_id_valid = action->vport.esw_owner_vhca_id_valid;
+               break;
+       case MLX5HWS_ACTION_TYP_POP_VLAN:
+               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS;
+               attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+               attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+               attr->remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START;
+               attr->remove_words.num_of_words = MLX5HWS_ACTION_HDR_LEN_L2_VLAN / 2;
+               break;
+       case MLX5HWS_ACTION_TYP_PUSH_VLAN:
+               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT;
+               attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+               attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+               attr->insert_header.encap = 0;
+               attr->insert_header.is_inline = 1;
+               attr->insert_header.insert_anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+               attr->insert_header.insert_offset = MLX5HWS_ACTION_HDR_LEN_L2_MACS;
+               attr->insert_header.header_size = MLX5HWS_ACTION_HDR_LEN_L2_VLAN;
+               break;
+       case MLX5HWS_ACTION_TYP_REMOVE_HEADER:
+               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS;
+               attr->remove_header.decap = 0; /* the mode we support decap is 0 */
+               attr->remove_words.start_anchor = action->remove_header.anchor;
+               /* the size is in already in words */
+               attr->remove_words.num_of_words = action->remove_header.size;
+               attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+               attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+               break;
+       default:
+               mlx5hws_err(action->ctx, "Invalid action type %d\n", action->type);
+       }
+}
+
+static int
+hws_action_create_stcs(struct mlx5hws_action *action, u32 obj_id)
+{
+       struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+       struct mlx5hws_context *ctx = action->ctx;
+       int ret;
+
+       hws_action_fill_stc_attr(action, obj_id, &stc_attr);
+
+       /* Block unsupported parallel obj modify over the same base */
+       mutex_lock(&ctx->ctrl_lock);
+
+       /* Allocate STC for FDB */
+       if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB) {
+               ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr,
+                                                     MLX5HWS_TABLE_TYPE_FDB,
+                                                     &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+               if (ret)
+                       goto out_err;
+       }
+
+       mutex_unlock(&ctx->ctrl_lock);
+
+       return 0;
+
+out_err:
+       mutex_unlock(&ctx->ctrl_lock);
+       return ret;
+}
+
+static void
+hws_action_destroy_stcs(struct mlx5hws_action *action)
+{
+       struct mlx5hws_context *ctx = action->ctx;
+
+       /* Block unsupported parallel obj modify over the same base */
+       mutex_lock(&ctx->ctrl_lock);
+
+       if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB)
+               mlx5hws_action_free_single_stc(ctx, MLX5HWS_TABLE_TYPE_FDB,
+                                              &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+
+       mutex_unlock(&ctx->ctrl_lock);
+}
+
+static bool hws_action_is_flag_hws_fdb(u32 flags)
+{
+       return flags & MLX5HWS_ACTION_FLAG_HWS_FDB;
+}
+
+static bool
+hws_action_validate_hws_action(struct mlx5hws_context *ctx, u32 flags)
+{
+       if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT)) {
+               mlx5hws_err(ctx, "Cannot create HWS action since HWS is not supported\n");
+               return false;
+       }
+
+       if ((flags & MLX5HWS_ACTION_FLAG_HWS_FDB) && !ctx->caps->eswitch_manager) {
+               mlx5hws_err(ctx, "Cannot create HWS action for FDB for non-eswitch-manager\n");
+               return false;
+       }
+
+       return true;
+}
+
+static struct mlx5hws_action *
+hws_action_create_generic_bulk(struct mlx5hws_context *ctx,
+                              u32 flags,
+                              enum mlx5hws_action_type action_type,
+                              u8 bulk_sz)
+{
+       struct mlx5hws_action *action;
+       int i;
+
+       if (!hws_action_is_flag_hws_fdb(flags)) {
+               mlx5hws_err(ctx,
+                           "Action (type: %d) flags must specify only HWS FDB\n", action_type);
+               return NULL;
+       }
+
+       if (!hws_action_validate_hws_action(ctx, flags))
+               return NULL;
+
+       action = kcalloc(bulk_sz, sizeof(*action), GFP_KERNEL);
+       if (!action)
+               return NULL;
+
+       for (i = 0; i < bulk_sz; i++) {
+               action[i].ctx = ctx;
+               action[i].flags = flags;
+               action[i].type = action_type;
+       }
+
+       return action;
+}
+
+static struct mlx5hws_action *
+hws_action_create_generic(struct mlx5hws_context *ctx,
+                         u32 flags,
+                         enum mlx5hws_action_type action_type)
+{
+       return hws_action_create_generic_bulk(ctx, flags, action_type, 1);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_table_num(struct mlx5hws_context *ctx,
+                                    u32 table_id,
+                                    u32 flags)
+{
+       struct mlx5hws_action *action;
+       int ret;
+
+       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_TBL);
+       if (!action)
+               return NULL;
+
+       ret = hws_action_create_stcs(action, table_id);
+       if (ret)
+               goto free_action;
+
+       action->dest_obj.obj_id = table_id;
+
+       return action;
+
+free_action:
+       kfree(action);
+       return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_table(struct mlx5hws_context *ctx,
+                                struct mlx5hws_table *tbl,
+                                u32 flags)
+{
+       return mlx5hws_action_create_dest_table_num(ctx, tbl->ft_id, flags);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_drop(struct mlx5hws_context *ctx, u32 flags)
+{
+       struct mlx5hws_action *action;
+       int ret;
+
+       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_DROP);
+       if (!action)
+               return NULL;
+
+       ret = hws_action_create_stcs(action, 0);
+       if (ret)
+               goto free_action;
+
+       return action;
+
+free_action:
+       kfree(action);
+       return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_default_miss(struct mlx5hws_context *ctx, u32 flags)
+{
+       struct mlx5hws_action *action;
+       int ret;
+
+       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_MISS);
+       if (!action)
+               return NULL;
+
+       ret = hws_action_create_stcs(action, 0);
+       if (ret)
+               goto free_action;
+
+       return action;
+
+free_action:
+       kfree(action);
+       return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_tag(struct mlx5hws_context *ctx, u32 flags)
+{
+       struct mlx5hws_action *action;
+       int ret;
+
+       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_TAG);
+       if (!action)
+               return NULL;
+
+       ret = hws_action_create_stcs(action, 0);
+       if (ret)
+               goto free_action;
+
+       return action;
+
+free_action:
+       kfree(action);
+       return NULL;
+}
+
+static struct mlx5hws_action *
+hws_action_create_aso(struct mlx5hws_context *ctx,
+                     enum mlx5hws_action_type action_type,
+                     u32 obj_id,
+                     u8 return_reg_id,
+                     u32 flags)
+{
+       struct mlx5hws_action *action;
+       int ret;
+
+       action = hws_action_create_generic(ctx, flags, action_type);
+       if (!action)
+               return NULL;
+
+       action->aso.obj_id = obj_id;
+       action->aso.return_reg_id = return_reg_id;
+
+       ret = hws_action_create_stcs(action, obj_id);
+       if (ret)
+               goto free_action;
+
+       return action;
+
+free_action:
+       kfree(action);
+       return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_aso_meter(struct mlx5hws_context *ctx,
+                               u32 obj_id,
+                               u8 return_reg_id,
+                               u32 flags)
+{
+       return hws_action_create_aso(ctx, MLX5HWS_ACTION_TYP_ASO_METER,
+                                    obj_id, return_reg_id, flags);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_counter(struct mlx5hws_context *ctx,
+                             u32 obj_id,
+                             u32 flags)
+{
+       struct mlx5hws_action *action;
+       int ret;
+
+       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_CTR);
+       if (!action)
+               return NULL;
+
+       ret = hws_action_create_stcs(action, obj_id);
+       if (ret)
+               goto free_action;
+
+       return action;
+
+free_action:
+       kfree(action);
+       return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_vport(struct mlx5hws_context *ctx,
+                                u16 vport_num,
+                                bool vhca_id_valid,
+                                u16 vhca_id,
+                                u32 flags)
+{
+       struct mlx5hws_action *action;
+       int ret;
+
+       if (!(flags & MLX5HWS_ACTION_FLAG_HWS_FDB)) {
+               mlx5hws_err(ctx, "Vport action is supported for FDB only\n");
+               return NULL;
+       }
+
+       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_VPORT);
+       if (!action)
+               return NULL;
+
+       if (!ctx->caps->merged_eswitch && vhca_id_valid && vhca_id != ctx->caps->vhca_id) {
+               mlx5hws_err(ctx, "Non merged eswitch cannot send to other vhca\n");
+               goto free_action;
+       }
+
+       action->vport.vport_num = vport_num;
+       action->vport.esw_owner_vhca_id_valid = vhca_id_valid;
+
+       if (vhca_id_valid)
+               action->vport.esw_owner_vhca_id = vhca_id;
+
+       ret = hws_action_create_stcs(action, 0);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed creating stc for vport %d\n", vport_num);
+               goto free_action;
+       }
+
+       return action;
+
+free_action:
+       kfree(action);
+       return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_push_vlan(struct mlx5hws_context *ctx, u32 flags)
+{
+       struct mlx5hws_action *action;
+       int ret;
+
+       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_PUSH_VLAN);
+       if (!action)
+               return NULL;
+
+       ret = hws_action_create_stcs(action, 0);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed creating stc for push vlan\n");
+               goto free_action;
+       }
+
+       return action;
+
+free_action:
+       kfree(action);
+       return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_pop_vlan(struct mlx5hws_context *ctx, u32 flags)
+{
+       struct mlx5hws_action *action;
+       int ret;
+
+       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_POP_VLAN);
+       if (!action)
+               return NULL;
+
+       ret = hws_action_get_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to create remove stc for reformat\n");
+               goto free_action;
+       }
+
+       ret = hws_action_create_stcs(action, 0);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed creating stc for pop vlan\n");
+               goto free_shared;
+       }
+
+       return action;
+
+free_shared:
+       hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP);
+free_action:
+       kfree(action);
+       return NULL;
+}
+
+static int
+hws_action_handle_insert_with_ptr(struct mlx5hws_action *action,
+                                 u8 num_of_hdrs,
+                                 struct mlx5hws_action_reformat_header *hdrs,
+                                 u32 log_bulk_sz)
+{
+       size_t max_sz = 0;
+       u32 arg_id;
+       int ret, i;
+
+       for (i = 0; i < num_of_hdrs; i++) {
+               if (hdrs[i].sz % W_SIZE != 0) {
+                       mlx5hws_err(action->ctx,
+                                   "Header data size should be in WORD granularity\n");
+                       return -EINVAL;
+               }
+               max_sz = max(hdrs[i].sz, max_sz);
+       }
+
+       /* Allocate single shared arg object for all headers */
+       ret = mlx5hws_arg_create(action->ctx,
+                                hdrs->data,
+                                max_sz,
+                                log_bulk_sz,
+                                action->flags & MLX5HWS_ACTION_FLAG_SHARED,
+                                &arg_id);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < num_of_hdrs; i++) {
+               action[i].reformat.arg_id = arg_id;
+               action[i].reformat.header_size = hdrs[i].sz;
+               action[i].reformat.num_of_hdrs = num_of_hdrs;
+               action[i].reformat.max_hdr_sz = max_sz;
+               action[i].reformat.require_reparse = true;
+
+               if (action[i].type == MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2 ||
+                   action[i].type == MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3) {
+                       action[i].reformat.anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+                       action[i].reformat.offset = 0;
+                       action[i].reformat.encap = 1;
+               }
+
+               ret = hws_action_create_stcs(&action[i], 0);
+               if (ret) {
+                       mlx5hws_err(action->ctx, "Failed to create stc for reformat\n");
+                       goto free_stc;
+               }
+       }
+
+       return 0;
+
+free_stc:
+       while (i--)
+               hws_action_destroy_stcs(&action[i]);
+
+       mlx5hws_arg_destroy(action->ctx, arg_id);
+       return ret;
+}
+
+static int
+hws_action_handle_l2_to_tunnel_l3(struct mlx5hws_action *action,
+                                 u8 num_of_hdrs,
+                                 struct mlx5hws_action_reformat_header *hdrs,
+                                 u32 log_bulk_sz)
+{
+       int ret;
+
+       /* The action is remove-l2-header + insert-l3-header */
+       ret = hws_action_get_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3);
+       if (ret) {
+               mlx5hws_err(action->ctx, "Failed to create remove stc for reformat\n");
+               return ret;
+       }
+
+       /* Reuse the insert with pointer for the L2L3 header */
+       ret = hws_action_handle_insert_with_ptr(action,
+                                               num_of_hdrs,
+                                               hdrs,
+                                               log_bulk_sz);
+       if (ret)
+               goto put_shared_stc;
+
+       return 0;
+
+put_shared_stc:
+       hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3);
+       return ret;
+}
+
+static void hws_action_prepare_decap_l3_actions(size_t data_sz,
+                                               u8 *mh_data,
+                                               int *num_of_actions)
+{
+       int actions;
+       u32 i;
+
+       /* Remove L2L3 outer headers */
+       MLX5_SET(stc_ste_param_remove, mh_data, action_type,
+                MLX5_MODIFICATION_TYPE_REMOVE);
+       MLX5_SET(stc_ste_param_remove, mh_data, decap, 0x1);
+       MLX5_SET(stc_ste_param_remove, mh_data, remove_start_anchor,
+                MLX5_HEADER_ANCHOR_PACKET_START);
+       MLX5_SET(stc_ste_param_remove, mh_data, remove_end_anchor,
+                MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4);
+       mh_data += MLX5HWS_ACTION_DOUBLE_SIZE; /* Assume every action is 2 dw */
+       actions = 1;
+
+       /* Add the new header using inline action 4Byte at a time, the header
+        * is added in reversed order to the beginning of the packet to avoid
+        * incorrect parsing by the HW. Since header is 14B or 18B an extra
+        * two bytes are padded and later removed.
+        */
+       for (i = 0; i < data_sz / MLX5HWS_ACTION_INLINE_DATA_SIZE + 1; i++) {
+               MLX5_SET(stc_ste_param_insert, mh_data, action_type,
+                        MLX5_MODIFICATION_TYPE_INSERT);
+               MLX5_SET(stc_ste_param_insert, mh_data, inline_data, 0x1);
+               MLX5_SET(stc_ste_param_insert, mh_data, insert_anchor,
+                        MLX5_HEADER_ANCHOR_PACKET_START);
+               MLX5_SET(stc_ste_param_insert, mh_data, insert_size, 2);
+               mh_data += MLX5HWS_ACTION_DOUBLE_SIZE;
+               actions++;
+       }
+
+       /* Remove first 2 extra bytes */
+       MLX5_SET(stc_ste_param_remove_words, mh_data, action_type,
+                MLX5_MODIFICATION_TYPE_REMOVE_WORDS);
+       MLX5_SET(stc_ste_param_remove_words, mh_data, remove_start_anchor,
+                MLX5_HEADER_ANCHOR_PACKET_START);
+       /* The hardware expects here size in words (2 bytes) */
+       MLX5_SET(stc_ste_param_remove_words, mh_data, remove_size, 1);
+       actions++;
+
+       *num_of_actions = actions;
+}
+
+static int
+hws_action_handle_tunnel_l3_to_l2(struct mlx5hws_action *action,
+                                 u8 num_of_hdrs,
+                                 struct mlx5hws_action_reformat_header *hdrs,
+                                 u32 log_bulk_sz)
+{
+       u8 mh_data[MLX5HWS_ACTION_REFORMAT_DATA_SIZE] = {0};
+       struct mlx5hws_context *ctx = action->ctx;
+       u32 arg_id, pat_id;
+       int num_of_actions;
+       int mh_data_size;
+       int ret, i;
+
+       for (i = 0; i < num_of_hdrs; i++) {
+               if (hdrs[i].sz != MLX5HWS_ACTION_HDR_LEN_L2 &&
+                   hdrs[i].sz != MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN) {
+                       mlx5hws_err(ctx, "Data size is not supported for decap-l3\n");
+                       return -EINVAL;
+               }
+       }
+
+       /* Create a full modify header action list in case shared */
+       hws_action_prepare_decap_l3_actions(hdrs->sz, mh_data, &num_of_actions);
+       if (action->flags & MLX5HWS_ACTION_FLAG_SHARED)
+               mlx5hws_action_prepare_decap_l3_data(hdrs->data, mh_data, num_of_actions);
+
+       /* All DecapL3 cases require the same max arg size */
+       ret = mlx5hws_arg_create_modify_header_arg(ctx,
+                                                  (__be64 *)mh_data,
+                                                  num_of_actions,
+                                                  log_bulk_sz,
+                                                  action->flags & MLX5HWS_ACTION_FLAG_SHARED,
+                                                  &arg_id);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < num_of_hdrs; i++) {
+               memset(mh_data, 0, MLX5HWS_ACTION_REFORMAT_DATA_SIZE);
+               hws_action_prepare_decap_l3_actions(hdrs[i].sz, mh_data, &num_of_actions);
+               mh_data_size = num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE;
+
+               ret = mlx5hws_pat_get_pattern(ctx, (__be64 *)mh_data, mh_data_size, &pat_id);
+               if (ret) {
+                       mlx5hws_err(ctx, "Failed to allocate pattern for DecapL3\n");
+                       goto free_stc_and_pat;
+               }
+
+               action[i].modify_header.max_num_of_actions = num_of_actions;
+               action[i].modify_header.num_of_actions = num_of_actions;
+               action[i].modify_header.num_of_patterns = num_of_hdrs;
+               action[i].modify_header.arg_id = arg_id;
+               action[i].modify_header.pat_id = pat_id;
+               action[i].modify_header.require_reparse =
+                       mlx5hws_pat_require_reparse((__be64 *)mh_data, num_of_actions);
+
+               ret = hws_action_create_stcs(&action[i], 0);
+               if (ret) {
+                       mlx5hws_pat_put_pattern(ctx, pat_id);
+                       goto free_stc_and_pat;
+               }
+       }
+
+       return 0;
+
+free_stc_and_pat:
+       while (i--) {
+               hws_action_destroy_stcs(&action[i]);
+               mlx5hws_pat_put_pattern(ctx, action[i].modify_header.pat_id);
+       }
+
+       mlx5hws_arg_destroy(action->ctx, arg_id);
+       return ret;
+}
+
+static int
+hws_action_create_reformat_hws(struct mlx5hws_action *action,
+                              u8 num_of_hdrs,
+                              struct mlx5hws_action_reformat_header *hdrs,
+                              u32 bulk_size)
+{
+       int ret;
+
+       switch (action->type) {
+       case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+               ret = hws_action_create_stcs(action, 0);
+               break;
+       case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+               ret = hws_action_handle_insert_with_ptr(action, num_of_hdrs, hdrs, bulk_size);
+               break;
+       case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+               ret = hws_action_handle_l2_to_tunnel_l3(action, num_of_hdrs, hdrs, bulk_size);
+               break;
+       case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+               ret = hws_action_handle_tunnel_l3_to_l2(action, num_of_hdrs, hdrs, bulk_size);
+               break;
+       default:
+               mlx5hws_err(action->ctx, "Invalid HWS reformat action type\n");
+               return -EINVAL;
+       }
+
+       return ret;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_reformat(struct mlx5hws_context *ctx,
+                              enum mlx5hws_action_type reformat_type,
+                              u8 num_of_hdrs,
+                              struct mlx5hws_action_reformat_header *hdrs,
+                              u32 log_bulk_size,
+                              u32 flags)
+{
+       struct mlx5hws_action *action;
+       int ret;
+
+       if (!num_of_hdrs) {
+               mlx5hws_err(ctx, "Reformat num_of_hdrs cannot be zero\n");
+               return NULL;
+       }
+
+       action = hws_action_create_generic_bulk(ctx, flags, reformat_type, num_of_hdrs);
+       if (!action)
+               return NULL;
+
+       if ((flags & MLX5HWS_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_hdrs > 1)) {
+               mlx5hws_err(ctx, "Reformat flags don't fit HWS (flags: 0x%x)\n", flags);
+               goto free_action;
+       }
+
+       ret = hws_action_create_reformat_hws(action, num_of_hdrs, hdrs, log_bulk_size);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to create HWS reformat action\n");
+               goto free_action;
+       }
+
+       return action;
+
+free_action:
+       kfree(action);
+       return NULL;
+}
+
+static int
+hws_action_create_modify_header_hws(struct mlx5hws_action *action,
+                                   u8 num_of_patterns,
+                                   struct mlx5hws_action_mh_pattern *pattern,
+                                   u32 log_bulk_size)
+{
+       struct mlx5hws_context *ctx = action->ctx;
+       u16 num_actions, max_mh_actions = 0;
+       int i, ret, size_in_bytes;
+       u32 pat_id, arg_id = 0;
+       __be64 *new_pattern;
+       size_t pat_max_sz;
+
+       pat_max_sz = MLX5HWS_ARG_CHUNK_SIZE_MAX * MLX5HWS_ARG_DATA_SIZE;
+       size_in_bytes = pat_max_sz * sizeof(__be64);
+       new_pattern = kcalloc(num_of_patterns, size_in_bytes, GFP_KERNEL);
+       if (!new_pattern)
+               return -ENOMEM;
+
+       /* Calculate maximum number of mh actions for shared arg allocation */
+       for (i = 0; i < num_of_patterns; i++) {
+               size_t new_num_actions;
+               size_t cur_num_actions;
+               u32 nope_location;
+
+               cur_num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE;
+
+               mlx5hws_pat_calc_nope(pattern[i].data, cur_num_actions,
+                                     pat_max_sz / MLX5HWS_MODIFY_ACTION_SIZE,
+                                     &new_num_actions, &nope_location,
+                                     &new_pattern[i * pat_max_sz]);
+
+               action[i].modify_header.nope_locations = nope_location;
+               action[i].modify_header.num_of_actions = new_num_actions;
+
+               max_mh_actions = max(max_mh_actions, new_num_actions);
+       }
+
+       if (mlx5hws_arg_get_arg_log_size(max_mh_actions) >= MLX5HWS_ARG_CHUNK_SIZE_MAX) {
+               mlx5hws_err(ctx, "Num of actions (%d) bigger than allowed\n",
+                           max_mh_actions);
+               ret = -EINVAL;
+               goto free_new_pat;
+       }
+
+       /* Allocate single shared arg for all patterns based on the max size */
+       if (max_mh_actions > 1) {
+               ret = mlx5hws_arg_create_modify_header_arg(ctx,
+                                                          pattern->data,
+                                                          max_mh_actions,
+                                                          log_bulk_size,
+                                                          action->flags &
+                                                          MLX5HWS_ACTION_FLAG_SHARED,
+                                                          &arg_id);
+               if (ret)
+                       goto free_new_pat;
+       }
+
+       for (i = 0; i < num_of_patterns; i++) {
+               if (!mlx5hws_pat_verify_actions(ctx, pattern[i].data, pattern[i].sz)) {
+                       mlx5hws_err(ctx, "Fail to verify pattern modify actions\n");
+                       ret = -EINVAL;
+                       goto free_stc_and_pat;
+               }
+               num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE;
+               action[i].modify_header.num_of_patterns = num_of_patterns;
+               action[i].modify_header.max_num_of_actions = max_mh_actions;
+
+               action[i].modify_header.require_reparse =
+                       mlx5hws_pat_require_reparse(pattern[i].data, num_actions);
+
+               if (num_actions == 1) {
+                       pat_id = 0;
+                       /* Optimize single modify action to be used inline */
+                       action[i].modify_header.single_action = pattern[i].data[0];
+                       action[i].modify_header.single_action_type =
+                               MLX5_GET(set_action_in, pattern[i].data, action_type);
+               } else {
+                       /* Multiple modify actions require a pattern */
+                       if (unlikely(action[i].modify_header.nope_locations)) {
+                               size_t pattern_sz;
+
+                               pattern_sz = action[i].modify_header.num_of_actions *
+                                            MLX5HWS_MODIFY_ACTION_SIZE;
+                               ret =
+                               mlx5hws_pat_get_pattern(ctx,
+                                                       &new_pattern[i * pat_max_sz],
+                                                       pattern_sz, &pat_id);
+                       } else {
+                               ret = mlx5hws_pat_get_pattern(ctx,
+                                                             pattern[i].data,
+                                                             pattern[i].sz,
+                                                             &pat_id);
+                       }
+                       if (ret) {
+                               mlx5hws_err(ctx,
+                                           "Failed to allocate pattern for modify header\n");
+                               goto free_stc_and_pat;
+                       }
+
+                       action[i].modify_header.arg_id = arg_id;
+                       action[i].modify_header.pat_id = pat_id;
+               }
+               /* Allocate STC for each action representing a header */
+               ret = hws_action_create_stcs(&action[i], 0);
+               if (ret) {
+                       if (pat_id)
+                               mlx5hws_pat_put_pattern(ctx, pat_id);
+                       goto free_stc_and_pat;
+               }
+       }
+
+       kfree(new_pattern);
+       return 0;
+
+free_stc_and_pat:
+       while (i--) {
+               hws_action_destroy_stcs(&action[i]);
+               if (action[i].modify_header.pat_id)
+                       mlx5hws_pat_put_pattern(ctx, action[i].modify_header.pat_id);
+       }
+
+       if (arg_id)
+               mlx5hws_arg_destroy(ctx, arg_id);
+free_new_pat:
+       kfree(new_pattern);
+       return ret;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_modify_header(struct mlx5hws_context *ctx,
+                                   u8 num_of_patterns,
+                                   struct mlx5hws_action_mh_pattern *patterns,
+                                   u32 log_bulk_size,
+                                   u32 flags)
+{
+       struct mlx5hws_action *action;
+       int ret;
+
+       if (!num_of_patterns) {
+               mlx5hws_err(ctx, "Invalid number of patterns\n");
+               return NULL;
+       }
+       action = hws_action_create_generic_bulk(ctx, flags,
+                                               MLX5HWS_ACTION_TYP_MODIFY_HDR,
+                                               num_of_patterns);
+       if (!action)
+               return NULL;
+
+       if ((flags & MLX5HWS_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_patterns > 1)) {
+               mlx5hws_err(ctx, "Action cannot be shared with requested pattern or size\n");
+               goto free_action;
+       }
+
+       ret = hws_action_create_modify_header_hws(action,
+                                                 num_of_patterns,
+                                                 patterns,
+                                                 log_bulk_size);
+       if (ret)
+               goto free_action;
+
+       return action;
+
+free_action:
+       kfree(action);
+       return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
+                                size_t num_dest,
+                                struct mlx5hws_action_dest_attr *dests,
+                                bool ignore_flow_level,
+                                u32 flow_source,
+                                u32 flags)
+{
+       struct mlx5hws_cmd_set_fte_dest *dest_list = NULL;
+       struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+       struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
+       struct mlx5hws_cmd_forward_tbl *fw_island;
+       struct mlx5hws_action *action;
+       u32 i /*, packet_reformat_id*/;
+       int ret;
+
+       if (num_dest <= 1) {
+               mlx5hws_err(ctx, "Action must have multiple dests\n");
+               return NULL;
+       }
+
+       if (flags == (MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED)) {
+               ft_attr.type = FS_FT_FDB;
+               ft_attr.level = ctx->caps->fdb_ft.max_level - 1;
+       } else {
+               mlx5hws_err(ctx, "Action flags not supported\n");
+               return NULL;
+       }
+
+       dest_list = kcalloc(num_dest, sizeof(*dest_list), GFP_KERNEL);
+       if (!dest_list)
+               return NULL;
+
+       for (i = 0; i < num_dest; i++) {
+               enum mlx5hws_action_type action_type = dests[i].dest->type;
+               struct mlx5hws_action *reformat_action = dests[i].reformat;
+
+               switch (action_type) {
+               case MLX5HWS_ACTION_TYP_TBL:
+                       dest_list[i].destination_type =
+                               MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+                       dest_list[i].destination_id = dests[i].dest->dest_obj.obj_id;
+                       fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+                       fte_attr.ignore_flow_level = ignore_flow_level;
+                       /* ToDo: In SW steering we have a handling of 'go to WIRE'
+                        * destination here by upper layer setting 'is_wire_ft' flag
+                        * if the destination is wire.
+                        * This is because uplink should be last dest in the list.
+                        */
+                       break;
+               case MLX5HWS_ACTION_TYP_VPORT:
+                       dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+                       dest_list[i].destination_id = dests[i].dest->vport.vport_num;
+                       fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+                       if (ctx->caps->merged_eswitch) {
+                               dest_list[i].ext_flags |=
+                                       MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID;
+                               dest_list[i].esw_owner_vhca_id =
+                                       dests[i].dest->vport.esw_owner_vhca_id;
+                       }
+                       break;
+               default:
+                       mlx5hws_err(ctx, "Unsupported action in dest_array\n");
+                       goto free_dest_list;
+               }
+
+               if (reformat_action) {
+                       mlx5hws_err(ctx, "dest_array with reformat action - unsupported\n");
+                       goto free_dest_list;
+               }
+       }
+
+       fte_attr.dests_num = num_dest;
+       fte_attr.dests = dest_list;
+
+       fw_island = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr);
+       if (!fw_island)
+               goto free_dest_list;
+
+       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_DEST_ARRAY);
+       if (!action)
+               goto destroy_fw_island;
+
+       ret = hws_action_create_stcs(action, fw_island->ft_id);
+       if (ret)
+               goto free_action;
+
+       action->dest_array.fw_island = fw_island;
+       action->dest_array.num_dest = num_dest;
+       action->dest_array.dest_list = dest_list;
+
+       return action;
+
+free_action:
+       kfree(action);
+destroy_fw_island:
+       mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, fw_island);
+free_dest_list:
+       for (i = 0; i < num_dest; i++) {
+               if (dest_list[i].ext_reformat_id)
+                       mlx5hws_cmd_packet_reformat_destroy(ctx->mdev,
+                                                           dest_list[i].ext_reformat_id);
+       }
+       kfree(dest_list);
+       return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_insert_header(struct mlx5hws_context *ctx,
+                                   u8 num_of_hdrs,
+                                   struct mlx5hws_action_insert_header *hdrs,
+                                   u32 log_bulk_size,
+                                   u32 flags)
+{
+       struct mlx5hws_action_reformat_header *reformat_hdrs;
+       struct mlx5hws_action *action;
+       int ret;
+       int i;
+
+       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_INSERT_HEADER);
+       if (!action)
+               return NULL;
+
+       reformat_hdrs = kcalloc(num_of_hdrs, sizeof(*reformat_hdrs), GFP_KERNEL);
+       if (!reformat_hdrs)
+               goto free_action;
+
+       for (i = 0; i < num_of_hdrs; i++) {
+               if (hdrs[i].offset % W_SIZE != 0) {
+                       mlx5hws_err(ctx, "Header offset should be in WORD granularity\n");
+                       goto free_reformat_hdrs;
+               }
+
+               action[i].reformat.anchor = hdrs[i].anchor;
+               action[i].reformat.encap = hdrs[i].encap;
+               action[i].reformat.offset = hdrs[i].offset;
+
+               reformat_hdrs[i].sz = hdrs[i].hdr.sz;
+               reformat_hdrs[i].data = hdrs[i].hdr.data;
+       }
+
+       ret = hws_action_handle_insert_with_ptr(action, num_of_hdrs,
+                                               reformat_hdrs, log_bulk_size);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to create HWS reformat action\n");
+               goto free_reformat_hdrs;
+       }
+
+       kfree(reformat_hdrs);
+
+       return action;
+
+free_reformat_hdrs:
+       kfree(reformat_hdrs);
+free_action:
+       kfree(action);
+       return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_remove_header(struct mlx5hws_context *ctx,
+                                   struct mlx5hws_action_remove_header_attr *attr,
+                                   u32 flags)
+{
+       struct mlx5hws_action *action;
+
+       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_REMOVE_HEADER);
+       if (!action)
+               return NULL;
+
+       /* support only remove anchor with size */
+       if (attr->size % W_SIZE != 0) {
+               mlx5hws_err(ctx,
+                           "Invalid size, HW supports header remove in WORD granularity\n");
+               goto free_action;
+       }
+
+       if (attr->size > MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE) {
+               mlx5hws_err(ctx, "Header removal size limited to %u bytes\n",
+                           MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE);
+               goto free_action;
+       }
+
+       action->remove_header.anchor = attr->anchor;
+       action->remove_header.size = attr->size / W_SIZE;
+
+       if (hws_action_create_stcs(action, 0))
+               goto free_action;
+
+       return action;
+
+free_action:
+       kfree(action);
+       return NULL;
+}
+
+static struct mlx5hws_definer *
+hws_action_create_dest_match_range_definer(struct mlx5hws_context *ctx)
+{
+       struct mlx5hws_definer *definer;
+       __be32 *tag;
+       int ret;
+
+       definer = kzalloc(sizeof(*definer), GFP_KERNEL);
+       if (!definer)
+               return NULL;
+
+       definer->dw_selector[0] = MLX5_IFC_DEFINER_FORMAT_OFFSET_OUTER_ETH_PKT_LEN / 4;
+       /* Set DW0 tag mask */
+       tag = (__force __be32 *)definer->mask.jumbo;
+       tag[MLX5HWS_RULE_JUMBO_MATCH_TAG_OFFSET_DW0] = htonl(0xffffUL << 16);
+
+       mutex_lock(&ctx->ctrl_lock);
+
+       ret = mlx5hws_definer_get_obj(ctx, definer);
+       if (ret < 0) {
+               mutex_unlock(&ctx->ctrl_lock);
+               kfree(definer);
+               return NULL;
+       }
+
+       mutex_unlock(&ctx->ctrl_lock);
+       definer->obj_id = ret;
+
+       return definer;
+}
+
+static struct mlx5hws_matcher_action_ste *
+hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
+                                        struct mlx5hws_definer *definer,
+                                        u32 miss_ft_id)
+{
+       struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
+       struct mlx5hws_action_default_stc *default_stc;
+       struct mlx5hws_matcher_action_ste *table_ste;
+       struct mlx5hws_pool_attr pool_attr = {0};
+       struct mlx5hws_pool *ste_pool, *stc_pool;
+       struct mlx5hws_pool_chunk *ste;
+       u32 *rtc_0_id, *rtc_1_id;
+       u32 obj_id;
+       int ret;
+
+       /* Check if STE range is supported */
+       if (!IS_BIT_SET(ctx->caps->supp_ste_format_gen_wqe, MLX5_IFC_RTC_STE_FORMAT_RANGE)) {
+               mlx5hws_err(ctx, "Range STE format not supported\n");
+               return NULL;
+       }
+
+       table_ste = kzalloc(sizeof(*table_ste), GFP_KERNEL);
+       if (!table_ste)
+               return NULL;
+
+       mutex_lock(&ctx->ctrl_lock);
+
+       pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
+       pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
+       pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL;
+       pool_attr.alloc_log_sz = 1;
+       table_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
+       if (!table_ste->pool) {
+               mlx5hws_err(ctx, "Failed to allocate memory ste pool\n");
+               goto free_ste;
+       }
+
+       /* Allocate RTC */
+       rtc_0_id = &table_ste->rtc_0_id;
+       rtc_1_id = &table_ste->rtc_1_id;
+       ste_pool = table_ste->pool;
+       ste = &table_ste->ste;
+       ste->order = 1;
+
+       rtc_attr.log_size = 0;
+       rtc_attr.log_depth = 0;
+       rtc_attr.miss_ft_id = miss_ft_id;
+       rtc_attr.num_hash_definer = 1;
+       rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
+       rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
+       rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
+       rtc_attr.fw_gen_wqe = true;
+       rtc_attr.is_scnd_range = true;
+
+       obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+
+       rtc_attr.pd = ctx->pd_num;
+       rtc_attr.ste_base = obj_id;
+       rtc_attr.ste_offset = ste->offset;
+       rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
+       rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, false);
+
+       /* STC is a single resource (obj_id), use any STC for the ID */
+       stc_pool = ctx->stc_pool[MLX5HWS_TABLE_TYPE_FDB];
+       default_stc = ctx->common_res[MLX5HWS_TABLE_TYPE_FDB].default_stc;
+       obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
+       rtc_attr.stc_base = obj_id;
+
+       ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to create RTC");
+               goto pool_destroy;
+       }
+
+       /* Create mirror RTC */
+       obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+       rtc_attr.ste_base = obj_id;
+       rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, true);
+
+       obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit);
+       rtc_attr.stc_base = obj_id;
+
+       ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to create mirror RTC");
+               goto destroy_rtc_0;
+       }
+
+       mutex_unlock(&ctx->ctrl_lock);
+
+       return table_ste;
+
+destroy_rtc_0:
+       mlx5hws_cmd_rtc_destroy(ctx->mdev, *rtc_0_id);
+pool_destroy:
+       mlx5hws_pool_destroy(table_ste->pool);
+free_ste:
+       mutex_unlock(&ctx->ctrl_lock);
+       kfree(table_ste);
+       return NULL;
+}
+
+static void
+hws_action_destroy_dest_match_range_table(struct mlx5hws_context *ctx,
+                                         struct mlx5hws_matcher_action_ste *table_ste)
+{
+       mutex_lock(&ctx->ctrl_lock);
+
+       mlx5hws_cmd_rtc_destroy(ctx->mdev, table_ste->rtc_1_id);
+       mlx5hws_cmd_rtc_destroy(ctx->mdev, table_ste->rtc_0_id);
+       mlx5hws_pool_destroy(table_ste->pool);
+       kfree(table_ste);
+
+       mutex_unlock(&ctx->ctrl_lock);
+}
+
+static int
+hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx,
+                                             struct mlx5hws_matcher_action_ste *table_ste,
+                                             struct mlx5hws_action *hit_ft_action,
+                                             struct mlx5hws_definer *range_definer,
+                                             u32 min, u32 max)
+{
+       struct mlx5hws_wqe_gta_data_seg_ste match_wqe_data = {0};
+       struct mlx5hws_wqe_gta_data_seg_ste range_wqe_data = {0};
+       struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl = {0};
+       u32 no_use, used_rtc_0_id, used_rtc_1_id, ret;
+       struct mlx5hws_context_common_res *common_res;
+       struct mlx5hws_send_ste_attr ste_attr = {0};
+       struct mlx5hws_send_engine *queue;
+       __be32 *wqe_data_arr;
+
+       mutex_lock(&ctx->ctrl_lock);
+
+       /* Get the control queue */
+       queue = &ctx->send_queue[ctx->queues - 1];
+       if (unlikely(mlx5hws_send_engine_err(queue))) {
+               ret = -EIO;
+               goto error;
+       }
+
+       /* Init default send STE attributes */
+       ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+       ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+       ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+       ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+       ste_attr.send_attr.user_data = &no_use;
+       ste_attr.send_attr.rule = NULL;
+       ste_attr.send_attr.fence = 1;
+       ste_attr.send_attr.notify_hw = true;
+       ste_attr.rtc_0 = table_ste->rtc_0_id;
+       ste_attr.rtc_1 = table_ste->rtc_1_id;
+       ste_attr.used_id_rtc_0 = &used_rtc_0_id;
+       ste_attr.used_id_rtc_1 = &used_rtc_1_id;
+
+       common_res = &ctx->common_res[MLX5HWS_TABLE_TYPE_FDB];
+
+       /* init an empty match STE which will always hit */
+       ste_attr.wqe_ctrl = &wqe_ctrl;
+       ste_attr.wqe_data = &match_wqe_data;
+       ste_attr.send_attr.match_definer_id = ctx->caps->trivial_match_definer;
+
+       /* Fill WQE control data */
+       wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] =
+               htonl(common_res->default_stc->nop_ctr.offset);
+       wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+               htonl(common_res->default_stc->nop_dw5.offset);
+       wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] =
+               htonl(common_res->default_stc->nop_dw6.offset);
+       wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] =
+               htonl(common_res->default_stc->nop_dw7.offset);
+       wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |=
+               htonl(MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 << 29);
+       wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] =
+               htonl(hit_ft_action->stc[MLX5HWS_TABLE_TYPE_FDB].offset);
+
+       wqe_data_arr = (__force __be32 *)&range_wqe_data;
+
+       ste_attr.range_wqe_data = &range_wqe_data;
+       ste_attr.send_attr.len += MLX5HWS_WQE_SZ_GTA_DATA;
+       ste_attr.send_attr.range_definer_id = mlx5hws_definer_get_id(range_definer);
+
+       /* Fill range matching fields,
+        * min/max_value_2 corresponds to match_dw_0 in its definer,
+        * min_value_2 sets in DW0 in the STE and max_value_2 sets in DW1 in the STE.
+        */
+       wqe_data_arr[MLX5HWS_MATCHER_OFFSET_TAG_DW0] = htonl(min << 16);
+       wqe_data_arr[MLX5HWS_MATCHER_OFFSET_TAG_DW1] = htonl(max << 16);
+
+       /* Send WQEs to FW */
+       mlx5hws_send_stes_fw(ctx, queue, &ste_attr);
+
+       /* Poll for completion */
+       ret = mlx5hws_send_queue_action(ctx, ctx->queues - 1,
+                                       MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to drain control queue");
+               goto error;
+       }
+
+       mutex_unlock(&ctx->ctrl_lock);
+
+       return 0;
+
+error:
+       mutex_unlock(&ctx->ctrl_lock);
+       return ret;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx,
+                                      u32 field,
+                                      struct mlx5_flow_table *hit_ft,
+                                      struct mlx5_flow_table *miss_ft,
+                                      u32 min, u32 max, u32 flags)
+{
+       struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+       struct mlx5hws_matcher_action_ste *table_ste;
+       struct mlx5hws_action *hit_ft_action;
+       struct mlx5hws_definer *definer;
+       struct mlx5hws_action *action;
+       u32 miss_ft_id = miss_ft->id;
+       u32 hit_ft_id = hit_ft->id;
+       int ret;
+
+       if (field != MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN ||
+           min > 0xffff || max > 0xffff) {
+               mlx5hws_err(ctx, "Invalid match range parameters\n");
+               return NULL;
+       }
+
+       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_RANGE);
+       if (!action)
+               return NULL;
+
+       definer = hws_action_create_dest_match_range_definer(ctx);
+       if (!definer)
+               goto free_action;
+
+       table_ste = hws_action_create_dest_match_range_table(ctx, definer, miss_ft_id);
+       if (!table_ste)
+               goto destroy_definer;
+
+       hit_ft_action = mlx5hws_action_create_dest_table_num(ctx, hit_ft_id, flags);
+       if (!hit_ft_action)
+               goto destroy_table_ste;
+
+       ret = hws_action_create_dest_match_range_fill_table(ctx, table_ste,
+                                                           hit_ft_action,
+                                                           definer, min, max);
+       if (ret)
+               goto destroy_hit_ft_action;
+
+       action->range.table_ste = table_ste;
+       action->range.definer = definer;
+       action->range.hit_ft_action = hit_ft_action;
+
+       /* Allocate STC for jumps to STE */
+       mutex_lock(&ctx->ctrl_lock);
+       stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+       stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
+       stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+       stc_attr.ste_table.ste = table_ste->ste;
+       stc_attr.ste_table.ste_pool = table_ste->pool;
+       stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
+
+       ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, MLX5HWS_TABLE_TYPE_FDB,
+                                             &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+       if (ret)
+               goto error_unlock;
+
+       mutex_unlock(&ctx->ctrl_lock);
+
+       return action;
+
+error_unlock:
+       mutex_unlock(&ctx->ctrl_lock);
+destroy_hit_ft_action:
+       mlx5hws_action_destroy(hit_ft_action);
+destroy_table_ste:
+       hws_action_destroy_dest_match_range_table(ctx, table_ste);
+destroy_definer:
+       mlx5hws_definer_free(ctx, definer);
+free_action:
+       kfree(action);
+       mlx5hws_err(ctx, "Failed to create action dest match range");
+       return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_last(struct mlx5hws_context *ctx, u32 flags)
+{
+       return hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_LAST);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_flow_sampler(struct mlx5hws_context *ctx,
+                                  u32 sampler_id, u32 flags)
+{
+       mlx5hws_err(ctx, "Flow sampler action - unsupported\n");
+       return NULL;
+}
+
+static void hws_action_destroy_hws(struct mlx5hws_action *action)
+{
+       u32 ext_reformat_id;
+       bool shared_arg;
+       u32 obj_id;
+       u32 i;
+
+       switch (action->type) {
+       case MLX5HWS_ACTION_TYP_MISS:
+       case MLX5HWS_ACTION_TYP_TAG:
+       case MLX5HWS_ACTION_TYP_DROP:
+       case MLX5HWS_ACTION_TYP_CTR:
+       case MLX5HWS_ACTION_TYP_TBL:
+       case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+       case MLX5HWS_ACTION_TYP_ASO_METER:
+       case MLX5HWS_ACTION_TYP_PUSH_VLAN:
+       case MLX5HWS_ACTION_TYP_REMOVE_HEADER:
+       case MLX5HWS_ACTION_TYP_VPORT:
+               hws_action_destroy_stcs(action);
+               break;
+       case MLX5HWS_ACTION_TYP_POP_VLAN:
+               hws_action_destroy_stcs(action);
+               hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP);
+               break;
+       case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+               hws_action_destroy_stcs(action);
+               mlx5hws_cmd_forward_tbl_destroy(action->ctx->mdev, action->dest_array.fw_island);
+               for (i = 0; i < action->dest_array.num_dest; i++) {
+                       ext_reformat_id = action->dest_array.dest_list[i].ext_reformat_id;
+                       if (ext_reformat_id)
+                               mlx5hws_cmd_packet_reformat_destroy(action->ctx->mdev,
+                                                                   ext_reformat_id);
+               }
+               kfree(action->dest_array.dest_list);
+               break;
+       case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+       case MLX5HWS_ACTION_TYP_MODIFY_HDR:
+               shared_arg = false;
+               for (i = 0; i < action->modify_header.num_of_patterns; i++) {
+                       hws_action_destroy_stcs(&action[i]);
+                       if (action[i].modify_header.num_of_actions > 1) {
+                               mlx5hws_pat_put_pattern(action[i].ctx,
+                                                       action[i].modify_header.pat_id);
+                               /* Save shared arg object to be freed after */
+                               obj_id = action[i].modify_header.arg_id;
+                               shared_arg = true;
+                       }
+               }
+               if (shared_arg)
+                       mlx5hws_arg_destroy(action->ctx, obj_id);
+               break;
+       case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+               hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3);
+               for (i = 0; i < action->reformat.num_of_hdrs; i++)
+                       hws_action_destroy_stcs(&action[i]);
+               mlx5hws_arg_destroy(action->ctx, action->reformat.arg_id);
+               break;
+       case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+       case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+               for (i = 0; i < action->reformat.num_of_hdrs; i++)
+                       hws_action_destroy_stcs(&action[i]);
+               mlx5hws_arg_destroy(action->ctx, action->reformat.arg_id);
+               break;
+       case MLX5HWS_ACTION_TYP_RANGE:
+               hws_action_destroy_stcs(action);
+               hws_action_destroy_dest_match_range_table(action->ctx, action->range.table_ste);
+               mlx5hws_definer_free(action->ctx, action->range.definer);
+               mlx5hws_action_destroy(action->range.hit_ft_action);
+               break;
+       case MLX5HWS_ACTION_TYP_LAST:
+               break;
+       default:
+               pr_warn("HWS: Invalid action type: %d\n", action->type);
+       }
+}
+
+int mlx5hws_action_destroy(struct mlx5hws_action *action)
+{
+       hws_action_destroy_hws(action);
+
+       kfree(action);
+       return 0;
+}
+
+int mlx5hws_action_get_default_stc(struct mlx5hws_context *ctx, u8 tbl_type)
+__must_hold(&ctx->ctrl_lock)
+{
+       struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+       struct mlx5hws_action_default_stc *default_stc;
+       int ret;
+
+       if (ctx->common_res[tbl_type].default_stc) {
+               ctx->common_res[tbl_type].default_stc->refcount++;
+               return 0;
+       }
+
+       default_stc = kzalloc(sizeof(*default_stc), GFP_KERNEL);
+       if (!default_stc)
+               return -ENOMEM;
+
+       stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_NOP;
+       stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW0;
+       stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+       ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+                                             &default_stc->nop_ctr);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to allocate default counter STC\n");
+               goto free_default_stc;
+       }
+
+       stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+       ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+                                             &default_stc->nop_dw5);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to allocate default NOP DW5 STC\n");
+               goto free_nop_ctr;
+       }
+
+       stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+       ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+                                             &default_stc->nop_dw6);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to allocate default NOP DW6 STC\n");
+               goto free_nop_dw5;
+       }
+
+       stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW7;
+       ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+                                             &default_stc->nop_dw7);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to allocate default NOP DW7 STC\n");
+               goto free_nop_dw6;
+       }
+
+       stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+       stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW;
+
+       ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+                                             &default_stc->default_hit);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to allocate default allow STC\n");
+               goto free_nop_dw7;
+       }
+
+       ctx->common_res[tbl_type].default_stc = default_stc;
+       ctx->common_res[tbl_type].default_stc->refcount++;
+
+       return 0;
+
+free_nop_dw7:
+       mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7);
+free_nop_dw6:
+       mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6);
+free_nop_dw5:
+       mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5);
+free_nop_ctr:
+       mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr);
+free_default_stc:
+       kfree(default_stc);
+       return ret;
+}
+
+void mlx5hws_action_put_default_stc(struct mlx5hws_context *ctx, u8 tbl_type)
+__must_hold(&ctx->ctrl_lock)
+{
+       struct mlx5hws_action_default_stc *default_stc;
+
+       default_stc = ctx->common_res[tbl_type].default_stc;
+
+       default_stc = ctx->common_res[tbl_type].default_stc;
+       if (--default_stc->refcount)
+               return;
+
+       mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->default_hit);
+       mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7);
+       mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6);
+       mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5);
+       mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr);
+       kfree(default_stc);
+       ctx->common_res[tbl_type].default_stc = NULL;
+}
+
+static void hws_action_modify_write(struct mlx5hws_send_engine *queue,
+                                   u32 arg_idx,
+                                   u8 *arg_data,
+                                   u16 num_of_actions,
+                                   u32 nope_locations)
+{
+       u8 *new_arg_data = NULL;
+       int i, j;
+
+       if (unlikely(nope_locations)) {
+               new_arg_data = kcalloc(num_of_actions,
+                                      MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL);
+               if (unlikely(!new_arg_data))
+                       return;
+
+               for (i = 0, j = 0; i < num_of_actions; i++, j++) {
+                       memcpy(&new_arg_data[j], arg_data, MLX5HWS_MODIFY_ACTION_SIZE);
+                       if (BIT(i) & nope_locations)
+                               j++;
+               }
+       }
+
+       mlx5hws_arg_write(queue, NULL, arg_idx,
+                         new_arg_data ? new_arg_data : arg_data,
+                         num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE);
+
+       kfree(new_arg_data);
+}
+
+void mlx5hws_action_prepare_decap_l3_data(u8 *src, u8 *dst, u16 num_of_actions)
+{
+       u8 *e_src;
+       int i;
+
+       /* num_of_actions = remove l3l2 + 4/5 inserts + remove extra 2 bytes
+        * copy from end of src to the start of dst.
+        * move to the end, 2 is the leftover from 14B or 18B
+        */
+       if (num_of_actions == DECAP_L3_NUM_ACTIONS_W_NO_VLAN)
+               e_src = src + MLX5HWS_ACTION_HDR_LEN_L2;
+       else
+               e_src = src + MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN;
+
+       /* Move dst over the first remove action + zero data */
+       dst += MLX5HWS_ACTION_DOUBLE_SIZE;
+       /* Move dst over the first insert ctrl action */
+       dst += MLX5HWS_ACTION_DOUBLE_SIZE / 2;
+       /* Actions:
+        * no vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b.
+        * with vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b.
+        * the loop is without the last insertion.
+        */
+       for (i = 0; i < num_of_actions - 3; i++) {
+               e_src -= MLX5HWS_ACTION_INLINE_DATA_SIZE;
+               memcpy(dst, e_src, MLX5HWS_ACTION_INLINE_DATA_SIZE); /* data */
+               dst += MLX5HWS_ACTION_DOUBLE_SIZE;
+       }
+       /* Copy the last 2 bytes after a gap of 2 bytes which will be removed */
+       e_src -= MLX5HWS_ACTION_INLINE_DATA_SIZE / 2;
+       dst += MLX5HWS_ACTION_INLINE_DATA_SIZE / 2;
+       memcpy(dst, e_src, 2);
+}
+
+static int
+hws_action_get_shared_stc_offset(struct mlx5hws_context_common_res *common_res,
+                                enum mlx5hws_context_shared_stc_type stc_type)
+{
+       return common_res->shared_stc[stc_type]->stc_chunk.offset;
+}
+
+static struct mlx5hws_actions_wqe_setter *
+hws_action_setter_find_first(struct mlx5hws_actions_wqe_setter *setter,
+                            u8 req_flags)
+{
+       /* Use a new setter if requested flags are taken */
+       while (setter->flags & req_flags)
+               setter++;
+
+       /* Use current setter in required flags are not used */
+       return setter;
+}
+
+static void
+hws_action_apply_stc(struct mlx5hws_actions_apply_data *apply,
+                    enum mlx5hws_action_stc_idx stc_idx,
+                    u8 action_idx)
+{
+       struct mlx5hws_action *action = apply->rule_action[action_idx].action;
+
+       apply->wqe_ctrl->stc_ix[stc_idx] =
+               htonl(action->stc[apply->tbl_type].offset);
+}
+
+static void
+hws_action_setter_push_vlan(struct mlx5hws_actions_apply_data *apply,
+                           struct mlx5hws_actions_wqe_setter *setter)
+{
+       struct mlx5hws_rule_action *rule_action;
+
+       rule_action = &apply->rule_action[setter->idx_double];
+       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = rule_action->push_vlan.vlan_hdr;
+
+       hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW6, setter->idx_double);
+       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+}
+
+static void
+hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply,
+                               struct mlx5hws_actions_wqe_setter *setter)
+{
+       struct mlx5hws_rule_action *rule_action;
+       struct mlx5hws_action *action;
+       u32 arg_sz, arg_idx;
+       u8 *single_action;
+       __be32 stc_idx;
+
+       rule_action = &apply->rule_action[setter->idx_double];
+       action = rule_action->action;
+
+       stc_idx = htonl(action->stc[apply->tbl_type].offset);
+       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
+       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+
+       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+
+       if (action->modify_header.num_of_actions == 1) {
+               if (action->modify_header.single_action_type ==
+                   MLX5_MODIFICATION_TYPE_COPY ||
+                   action->modify_header.single_action_type ==
+                   MLX5_MODIFICATION_TYPE_ADD_FIELD) {
+                       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0;
+                       return;
+               }
+
+               if (action->flags & MLX5HWS_ACTION_FLAG_SHARED)
+                       single_action = (u8 *)&action->modify_header.single_action;
+               else
+                       single_action = rule_action->modify_header.data;
+
+               apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] =
+                       *(__be32 *)MLX5_ADDR_OF(set_action_in, single_action, data);
+       } else {
+               /* Argument offset multiple with number of args per these actions */
+               arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions);
+               arg_idx = rule_action->modify_header.offset * arg_sz;
+
+               apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
+
+               if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
+                       apply->require_dep = 1;
+                       hws_action_modify_write(apply->queue,
+                                               action->modify_header.arg_id + arg_idx,
+                                               rule_action->modify_header.data,
+                                               action->modify_header.num_of_actions,
+                                               action->modify_header.nope_locations);
+               }
+       }
+}
+
+static void
+hws_action_setter_insert_ptr(struct mlx5hws_actions_apply_data *apply,
+                            struct mlx5hws_actions_wqe_setter *setter)
+{
+       struct mlx5hws_rule_action *rule_action;
+       struct mlx5hws_action *action;
+       u32 arg_idx, arg_sz;
+       __be32 stc_idx;
+
+       rule_action = &apply->rule_action[setter->idx_double];
+       action = rule_action->action + rule_action->reformat.hdr_idx;
+
+       /* Argument offset multiple on args required for header size */
+       arg_sz = mlx5hws_arg_data_size_to_arg_size(action->reformat.max_hdr_sz);
+       arg_idx = rule_action->reformat.offset * arg_sz;
+
+       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
+
+       stc_idx = htonl(action->stc[apply->tbl_type].offset);
+       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
+       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+
+       if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
+               apply->require_dep = 1;
+               mlx5hws_arg_write(apply->queue, NULL,
+                                 action->reformat.arg_id + arg_idx,
+                                 rule_action->reformat.data,
+                                 action->reformat.header_size);
+       }
+}
+
+static void
+hws_action_setter_tnl_l3_to_l2(struct mlx5hws_actions_apply_data *apply,
+                              struct mlx5hws_actions_wqe_setter *setter)
+{
+       struct mlx5hws_rule_action *rule_action;
+       struct mlx5hws_action *action;
+       u32 arg_sz, arg_idx;
+       __be32 stc_idx;
+
+       rule_action = &apply->rule_action[setter->idx_double];
+       action = rule_action->action + rule_action->reformat.hdr_idx;
+
+       /* Argument offset multiple on args required for num of actions */
+       arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions);
+       arg_idx = rule_action->reformat.offset * arg_sz;
+
+       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
+
+       stc_idx = htonl(action->stc[apply->tbl_type].offset);
+       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
+       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+
+       if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
+               apply->require_dep = 1;
+               mlx5hws_arg_decapl3_write(apply->queue,
+                                         action->modify_header.arg_id + arg_idx,
+                                         rule_action->reformat.data,
+                                         action->modify_header.num_of_actions);
+       }
+}
+
+static void
+hws_action_setter_aso(struct mlx5hws_actions_apply_data *apply,
+                     struct mlx5hws_actions_wqe_setter *setter)
+{
+       struct mlx5hws_rule_action *rule_action;
+       u32 exe_aso_ctrl;
+       u32 offset;
+
+       rule_action = &apply->rule_action[setter->idx_double];
+
+       switch (rule_action->action->type) {
+       case MLX5HWS_ACTION_TYP_ASO_METER:
+               /* exe_aso_ctrl format:
+                * [STC only and reserved bits 29b][init_color 2b][meter_id 1b]
+                */
+               offset = rule_action->aso_meter.offset / MLX5_ASO_METER_NUM_PER_OBJ;
+               exe_aso_ctrl = rule_action->aso_meter.offset % MLX5_ASO_METER_NUM_PER_OBJ;
+               exe_aso_ctrl |= rule_action->aso_meter.init_color <<
+                               MLX5HWS_ACTION_METER_INIT_COLOR_OFFSET;
+               break;
+       default:
+               mlx5hws_err(rule_action->action->ctx,
+                           "Unsupported ASO action type: %d\n", rule_action->action->type);
+               return;
+       }
+
+       /* aso_object_offset format: [24B] */
+       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = htonl(offset);
+       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(exe_aso_ctrl);
+
+       hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW6, setter->idx_double);
+       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+}
+
+static void
+hws_action_setter_tag(struct mlx5hws_actions_apply_data *apply,
+                     struct mlx5hws_actions_wqe_setter *setter)
+{
+       struct mlx5hws_rule_action *rule_action;
+
+       rule_action = &apply->rule_action[setter->idx_single];
+       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = htonl(rule_action->tag.value);
+       hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW5, setter->idx_single);
+}
+
+static void
+hws_action_setter_ctrl_ctr(struct mlx5hws_actions_apply_data *apply,
+                          struct mlx5hws_actions_wqe_setter *setter)
+{
+       struct mlx5hws_rule_action *rule_action;
+
+       rule_action = &apply->rule_action[setter->idx_ctr];
+       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW0] = htonl(rule_action->counter.offset);
+       hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_CTRL, setter->idx_ctr);
+}
+
+static void
+hws_action_setter_single(struct mlx5hws_actions_apply_data *apply,
+                        struct mlx5hws_actions_wqe_setter *setter)
+{
+       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+       hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW5, setter->idx_single);
+}
+
+static void
+hws_action_setter_single_double_pop(struct mlx5hws_actions_apply_data *apply,
+                                   struct mlx5hws_actions_wqe_setter *setter)
+{
+       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+               htonl(hws_action_get_shared_stc_offset(apply->common_res,
+                                                      MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP));
+}
+
+static void
+hws_action_setter_hit(struct mlx5hws_actions_apply_data *apply,
+                     struct mlx5hws_actions_wqe_setter *setter)
+{
+       apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0;
+       hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_HIT, setter->idx_hit);
+}
+
+static void
+hws_action_setter_default_hit(struct mlx5hws_actions_apply_data *apply,
+                             struct mlx5hws_actions_wqe_setter *setter)
+{
+       apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0;
+       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] =
+               htonl(apply->common_res->default_stc->default_hit.offset);
+}
+
+static void
+hws_action_setter_hit_next_action(struct mlx5hws_actions_apply_data *apply,
+                                 struct mlx5hws_actions_wqe_setter *setter)
+{
+       apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = htonl(apply->next_direct_idx << 6);
+       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] = htonl(apply->jump_to_action_stc);
+}
+
+static void
+hws_action_setter_common_decap(struct mlx5hws_actions_apply_data *apply,
+                              struct mlx5hws_actions_wqe_setter *setter)
+{
+       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+               htonl(hws_action_get_shared_stc_offset(apply->common_res,
+                                                      MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3));
+}
+
+static void
+hws_action_setter_range(struct mlx5hws_actions_apply_data *apply,
+                       struct mlx5hws_actions_wqe_setter *setter)
+{
+       /* Always jump to index zero */
+       apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0;
+       hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_HIT, setter->idx_hit);
+}
+
+int mlx5hws_action_template_process(struct mlx5hws_action_template *at)
+{
+       struct mlx5hws_actions_wqe_setter *start_setter = at->setters + 1;
+       enum mlx5hws_action_type *action_type = at->action_type_arr;
+       struct mlx5hws_actions_wqe_setter *setter = at->setters;
+       struct mlx5hws_actions_wqe_setter *pop_setter = NULL;
+       struct mlx5hws_actions_wqe_setter *last_setter;
+       int i;
+
+       /* Note: Given action combination must be valid */
+
+       /* Check if action were already processed */
+       if (at->num_of_action_stes)
+               return 0;
+
+       for (i = 0; i < MLX5HWS_ACTION_MAX_STE; i++)
+               setter[i].set_hit = &hws_action_setter_hit_next_action;
+
+       /* The same action template setters can be used with jumbo or match
+        * STE, to support both cases we reserve the first setter for cases
+        * with jumbo STE to allow jump to the first action STE.
+        * This extra setter can be reduced in some cases on rule creation.
+        */
+       setter = start_setter;
+       last_setter = start_setter;
+
+       for (i = 0; i < at->num_actions; i++) {
+               switch (action_type[i]) {
+               case MLX5HWS_ACTION_TYP_DROP:
+               case MLX5HWS_ACTION_TYP_TBL:
+               case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+               case MLX5HWS_ACTION_TYP_VPORT:
+               case MLX5HWS_ACTION_TYP_MISS:
+                       /* Hit action */
+                       last_setter->flags |= ASF_HIT;
+                       last_setter->set_hit = &hws_action_setter_hit;
+                       last_setter->idx_hit = i;
+                       break;
+
+               case MLX5HWS_ACTION_TYP_RANGE:
+                       last_setter->flags |= ASF_HIT;
+                       last_setter->set_hit = &hws_action_setter_range;
+                       last_setter->idx_hit = i;
+                       break;
+
+               case MLX5HWS_ACTION_TYP_POP_VLAN:
+                       /* Single remove header to header */
+                       if (pop_setter) {
+                               /* We have 2 pops, use the shared */
+                               pop_setter->set_single = &hws_action_setter_single_double_pop;
+                               break;
+                       }
+                       setter = hws_action_setter_find_first(last_setter,
+                                                             ASF_SINGLE1 | ASF_MODIFY |
+                                                             ASF_INSERT);
+                       setter->flags |= ASF_SINGLE1 | ASF_REMOVE;
+                       setter->set_single = &hws_action_setter_single;
+                       setter->idx_single = i;
+                       pop_setter = setter;
+                       break;
+
+               case MLX5HWS_ACTION_TYP_PUSH_VLAN:
+                       /* Double insert inline */
+                       setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+                       setter->flags |= ASF_DOUBLE | ASF_INSERT;
+                       setter->set_double = &hws_action_setter_push_vlan;
+                       setter->idx_double = i;
+                       break;
+
+               case MLX5HWS_ACTION_TYP_MODIFY_HDR:
+                       /* Double modify header list */
+                       setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+                       setter->flags |= ASF_DOUBLE | ASF_MODIFY;
+                       setter->set_double = &hws_action_setter_modify_header;
+                       setter->idx_double = i;
+                       break;
+
+               case MLX5HWS_ACTION_TYP_ASO_METER:
+                       /* Double ASO action */
+                       setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE);
+                       setter->flags |= ASF_DOUBLE;
+                       setter->set_double = &hws_action_setter_aso;
+                       setter->idx_double = i;
+                       break;
+
+               case MLX5HWS_ACTION_TYP_REMOVE_HEADER:
+               case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+                       /* Single remove header to header */
+                       setter = hws_action_setter_find_first(last_setter,
+                                                             ASF_SINGLE1 | ASF_MODIFY);
+                       setter->flags |= ASF_SINGLE1 | ASF_REMOVE;
+                       setter->set_single = &hws_action_setter_single;
+                       setter->idx_single = i;
+                       break;
+
+               case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+               case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+                       /* Double insert header with pointer */
+                       setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+                       setter->flags |= ASF_DOUBLE | ASF_INSERT;
+                       setter->set_double = &hws_action_setter_insert_ptr;
+                       setter->idx_double = i;
+                       break;
+
+               case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+                       /* Single remove + Double insert header with pointer */
+                       setter = hws_action_setter_find_first(last_setter,
+                                                             ASF_SINGLE1 | ASF_DOUBLE);
+                       setter->flags |= ASF_SINGLE1 | ASF_DOUBLE;
+                       setter->set_double = &hws_action_setter_insert_ptr;
+                       setter->idx_double = i;
+                       setter->set_single = &hws_action_setter_common_decap;
+                       setter->idx_single = i;
+                       break;
+
+               case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+                       /* Double modify header list with remove and push inline */
+                       setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+                       setter->flags |= ASF_DOUBLE | ASF_MODIFY | ASF_INSERT;
+                       setter->set_double = &hws_action_setter_tnl_l3_to_l2;
+                       setter->idx_double = i;
+                       break;
+
+               case MLX5HWS_ACTION_TYP_TAG:
+                       /* Single TAG action, search for any room from the start */
+                       setter = hws_action_setter_find_first(start_setter, ASF_SINGLE1);
+                       setter->flags |= ASF_SINGLE1;
+                       setter->set_single = &hws_action_setter_tag;
+                       setter->idx_single = i;
+                       break;
+
+               case MLX5HWS_ACTION_TYP_CTR:
+                       /* Control counter action
+                        * TODO: Current counter executed first. Support is needed
+                        *       for single ation counter action which is done last.
+                        *       Example: Decap + CTR
+                        */
+                       setter = hws_action_setter_find_first(start_setter, ASF_CTR);
+                       setter->flags |= ASF_CTR;
+                       setter->set_ctr = &hws_action_setter_ctrl_ctr;
+                       setter->idx_ctr = i;
+                       break;
+               default:
+                       pr_warn("HWS: Invalid action type in processingaction template: action_type[%d]=%d\n",
+                               i, action_type[i]);
+                       return -EOPNOTSUPP;
+               }
+
+               last_setter = max(setter, last_setter);
+       }
+
+       /* Set default hit on the last STE if no hit action provided */
+       if (!(last_setter->flags & ASF_HIT))
+               last_setter->set_hit = &hws_action_setter_default_hit;
+
+       at->num_of_action_stes = last_setter - start_setter + 1;
+
+       /* Check if action template doesn't require any action DWs */
+       at->only_term = (at->num_of_action_stes == 1) &&
+               !(last_setter->flags & ~(ASF_CTR | ASF_HIT));
+
+       return 0;
+}
+
+struct mlx5hws_action_template *
+mlx5hws_action_template_create(enum mlx5hws_action_type action_type[])
+{
+       struct mlx5hws_action_template *at;
+       u8 num_actions = 0;
+       int i;
+
+       at = kzalloc(sizeof(*at), GFP_KERNEL);
+       if (!at)
+               return NULL;
+
+       while (action_type[num_actions++] != MLX5HWS_ACTION_TYP_LAST)
+               ;
+
+       at->num_actions = num_actions - 1;
+       at->action_type_arr = kcalloc(num_actions, sizeof(*action_type), GFP_KERNEL);
+       if (!at->action_type_arr)
+               goto free_at;
+
+       for (i = 0; i < num_actions; i++)
+               at->action_type_arr[i] = action_type[i];
+
+       return at;
+
+free_at:
+       kfree(at);
+       return NULL;
+}
+
+int mlx5hws_action_template_destroy(struct mlx5hws_action_template *at)
+{
+       kfree(at->action_type_arr);
+       kfree(at);
+       return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
new file mode 100644 (file)
index 0000000..e8f562c
--- /dev/null
@@ -0,0 +1,307 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef HWS_ACTION_H_
+#define HWS_ACTION_H_
+
+/* Max number of STEs needed for a rule (including match) */
+#define MLX5HWS_ACTION_MAX_STE 20
+
+/* Max number of internal subactions of ipv6_ext */
+#define MLX5HWS_ACTION_IPV6_EXT_MAX_SA 4
+
+enum mlx5hws_action_stc_idx {
+       MLX5HWS_ACTION_STC_IDX_CTRL = 0,
+       MLX5HWS_ACTION_STC_IDX_HIT = 1,
+       MLX5HWS_ACTION_STC_IDX_DW5 = 2,
+       MLX5HWS_ACTION_STC_IDX_DW6 = 3,
+       MLX5HWS_ACTION_STC_IDX_DW7 = 4,
+       MLX5HWS_ACTION_STC_IDX_MAX = 5,
+       /* STC Jumvo STE combo: CTR, Hit */
+       MLX5HWS_ACTION_STC_IDX_LAST_JUMBO_STE = 1,
+       /* STC combo1: CTR, SINGLE, DOUBLE, Hit */
+       MLX5HWS_ACTION_STC_IDX_LAST_COMBO1 = 3,
+       /* STC combo2: CTR, 3 x SINGLE, Hit */
+       MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 = 4,
+       /* STC combo2: CTR, TRIPLE, Hit */
+       MLX5HWS_ACTION_STC_IDX_LAST_COMBO3 = 2,
+};
+
+enum mlx5hws_action_offset {
+       MLX5HWS_ACTION_OFFSET_DW0 = 0,
+       MLX5HWS_ACTION_OFFSET_DW5 = 5,
+       MLX5HWS_ACTION_OFFSET_DW6 = 6,
+       MLX5HWS_ACTION_OFFSET_DW7 = 7,
+       MLX5HWS_ACTION_OFFSET_HIT = 3,
+       MLX5HWS_ACTION_OFFSET_HIT_LSB = 4,
+};
+
+enum {
+       MLX5HWS_ACTION_DOUBLE_SIZE = 8,
+       MLX5HWS_ACTION_INLINE_DATA_SIZE = 4,
+       MLX5HWS_ACTION_HDR_LEN_L2_MACS = 12,
+       MLX5HWS_ACTION_HDR_LEN_L2_VLAN = 4,
+       MLX5HWS_ACTION_HDR_LEN_L2_ETHER = 2,
+       MLX5HWS_ACTION_HDR_LEN_L2 = (MLX5HWS_ACTION_HDR_LEN_L2_MACS +
+                                    MLX5HWS_ACTION_HDR_LEN_L2_ETHER),
+       MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN = (MLX5HWS_ACTION_HDR_LEN_L2 +
+                                           MLX5HWS_ACTION_HDR_LEN_L2_VLAN),
+       MLX5HWS_ACTION_REFORMAT_DATA_SIZE = 64,
+       DECAP_L3_NUM_ACTIONS_W_NO_VLAN = 6,
+       DECAP_L3_NUM_ACTIONS_W_VLAN = 7,
+};
+
+enum mlx5hws_action_setter_flag {
+       ASF_SINGLE1 = 1 << 0,
+       ASF_SINGLE2 = 1 << 1,
+       ASF_SINGLE3 = 1 << 2,
+       ASF_DOUBLE = ASF_SINGLE2 | ASF_SINGLE3,
+       ASF_TRIPLE = ASF_SINGLE1 | ASF_DOUBLE,
+       ASF_INSERT = 1 << 3,
+       ASF_REMOVE = 1 << 4,
+       ASF_MODIFY = 1 << 5,
+       ASF_CTR = 1 << 6,
+       ASF_HIT = 1 << 7,
+};
+
+struct mlx5hws_action_default_stc {
+       struct mlx5hws_pool_chunk nop_ctr;
+       struct mlx5hws_pool_chunk nop_dw5;
+       struct mlx5hws_pool_chunk nop_dw6;
+       struct mlx5hws_pool_chunk nop_dw7;
+       struct mlx5hws_pool_chunk default_hit;
+       u32 refcount;
+};
+
+struct mlx5hws_action_shared_stc {
+       struct mlx5hws_pool_chunk stc_chunk;
+       u32 refcount;
+};
+
+struct mlx5hws_actions_apply_data {
+       struct mlx5hws_send_engine *queue;
+       struct mlx5hws_rule_action *rule_action;
+       __be32 *wqe_data;
+       struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+       u32 jump_to_action_stc;
+       struct mlx5hws_context_common_res *common_res;
+       enum mlx5hws_table_type tbl_type;
+       u32 next_direct_idx;
+       u8 require_dep;
+};
+
+struct mlx5hws_actions_wqe_setter;
+
+typedef void (*mlx5hws_action_setter_fp)(struct mlx5hws_actions_apply_data *apply,
+                                        struct mlx5hws_actions_wqe_setter *setter);
+
+struct mlx5hws_actions_wqe_setter {
+       mlx5hws_action_setter_fp set_single;
+       mlx5hws_action_setter_fp set_double;
+       mlx5hws_action_setter_fp set_triple;
+       mlx5hws_action_setter_fp set_hit;
+       mlx5hws_action_setter_fp set_ctr;
+       u8 idx_single;
+       u8 idx_double;
+       u8 idx_triple;
+       u8 idx_ctr;
+       u8 idx_hit;
+       u8 stage_idx;
+       u8 flags;
+};
+
+struct mlx5hws_action_template {
+       struct mlx5hws_actions_wqe_setter setters[MLX5HWS_ACTION_MAX_STE];
+       enum mlx5hws_action_type *action_type_arr;
+       u8 num_of_action_stes;
+       u8 num_actions;
+       u8 only_term;
+};
+
+struct mlx5hws_action {
+       u8 type;
+       u8 flags;
+       struct mlx5hws_context *ctx;
+       union {
+               struct {
+                       struct mlx5hws_pool_chunk stc[MLX5HWS_TABLE_TYPE_MAX];
+                       union {
+                               struct {
+                                       u32 pat_id;
+                                       u32 arg_id;
+                                       __be64 single_action;
+                                       u32 nope_locations;
+                                       u8 num_of_patterns;
+                                       u8 single_action_type;
+                                       u8 num_of_actions;
+                                       u8 max_num_of_actions;
+                                       u8 require_reparse;
+                               } modify_header;
+                               struct {
+                                       u32 arg_id;
+                                       u32 header_size;
+                                       u16 max_hdr_sz;
+                                       u8 num_of_hdrs;
+                                       u8 anchor;
+                                       u8 e_anchor;
+                                       u8 offset;
+                                       bool encap;
+                                       u8 require_reparse;
+                               } reformat;
+                               struct {
+                                       u32 obj_id;
+                                       u8 return_reg_id;
+                               } aso;
+                               struct {
+                                       u16 vport_num;
+                                       u16 esw_owner_vhca_id;
+                                       bool esw_owner_vhca_id_valid;
+                               } vport;
+                               struct {
+                                       u32 obj_id;
+                               } dest_obj;
+                               struct {
+                                       struct mlx5hws_cmd_forward_tbl *fw_island;
+                                       size_t num_dest;
+                                       struct mlx5hws_cmd_set_fte_dest *dest_list;
+                               } dest_array;
+                               struct {
+                                       u8 type;
+                                       u8 start_anchor;
+                                       u8 end_anchor;
+                                       u8 num_of_words;
+                                       bool decap;
+                               } insert_hdr;
+                               struct {
+                                       /* PRM start anchor from which header will be removed */
+                                       u8 anchor;
+                                       /* Header remove offset in bytes, from the start
+                                        * anchor to the location where remove header starts.
+                                        */
+                                       u8 offset;
+                                       /* Indicates the removed header size in bytes */
+                                       size_t size;
+                               } remove_header;
+                               struct {
+                                       struct mlx5hws_matcher_action_ste *table_ste;
+                                       struct mlx5hws_action *hit_ft_action;
+                                       struct mlx5hws_definer *definer;
+                               } range;
+                       };
+               };
+
+               struct ibv_flow_action *flow_action;
+               u32 obj_id;
+               struct ibv_qp *qp;
+       };
+};
+
+const char *mlx5hws_action_type_to_str(enum mlx5hws_action_type action_type);
+
+int mlx5hws_action_get_default_stc(struct mlx5hws_context *ctx,
+                                  u8 tbl_type);
+
+void mlx5hws_action_put_default_stc(struct mlx5hws_context *ctx,
+                                   u8 tbl_type);
+
+void mlx5hws_action_prepare_decap_l3_data(u8 *src, u8 *dst,
+                                         u16 num_of_actions);
+
+int mlx5hws_action_template_process(struct mlx5hws_action_template *at);
+
+bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx,
+                               enum mlx5hws_action_type *user_actions,
+                               enum mlx5hws_table_type table_type);
+
+int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx,
+                                   struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+                                   u32 table_type,
+                                   struct mlx5hws_pool_chunk *stc);
+
+void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx,
+                                   u32 table_type,
+                                   struct mlx5hws_pool_chunk *stc);
+
+static inline void
+mlx5hws_action_setter_default_single(struct mlx5hws_actions_apply_data *apply,
+                                    struct mlx5hws_actions_wqe_setter *setter)
+{
+       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+               htonl(apply->common_res->default_stc->nop_dw5.offset);
+}
+
+static inline void
+mlx5hws_action_setter_default_double(struct mlx5hws_actions_apply_data *apply,
+                                    struct mlx5hws_actions_wqe_setter *setter)
+{
+       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0;
+       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] =
+               htonl(apply->common_res->default_stc->nop_dw6.offset);
+       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] =
+               htonl(apply->common_res->default_stc->nop_dw7.offset);
+}
+
+static inline void
+mlx5hws_action_setter_default_ctr(struct mlx5hws_actions_apply_data *apply,
+                                 struct mlx5hws_actions_wqe_setter *setter)
+{
+       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW0] = 0;
+       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] =
+               htonl(apply->common_res->default_stc->nop_ctr.offset);
+}
+
+static inline void
+mlx5hws_action_apply_setter(struct mlx5hws_actions_apply_data *apply,
+                           struct mlx5hws_actions_wqe_setter *setter,
+                           bool is_jumbo)
+{
+       u8 num_of_actions;
+
+       /* Set control counter */
+       if (setter->set_ctr)
+               setter->set_ctr(apply, setter);
+       else
+               mlx5hws_action_setter_default_ctr(apply, setter);
+
+       if (!is_jumbo) {
+               if (unlikely(setter->set_triple)) {
+                       /* Set triple on match */
+                       setter->set_triple(apply, setter);
+                       num_of_actions = MLX5HWS_ACTION_STC_IDX_LAST_COMBO3;
+               } else {
+                       /* Set single and double on match */
+                       if (setter->set_single)
+                               setter->set_single(apply, setter);
+                       else
+                               mlx5hws_action_setter_default_single(apply, setter);
+
+                       if (setter->set_double)
+                               setter->set_double(apply, setter);
+                       else
+                               mlx5hws_action_setter_default_double(apply, setter);
+
+                       num_of_actions = setter->set_double ?
+                               MLX5HWS_ACTION_STC_IDX_LAST_COMBO1 :
+                               MLX5HWS_ACTION_STC_IDX_LAST_COMBO2;
+               }
+       } else {
+               apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+               apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+               apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0;
+               apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] = 0;
+               apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = 0;
+               apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+               num_of_actions = MLX5HWS_ACTION_STC_IDX_LAST_JUMBO_STE;
+       }
+
+       /* Set next/final hit action */
+       setter->set_hit(apply, setter);
+
+       /* Set number of actions */
+       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |=
+               htonl(num_of_actions << 29);
+}
+
+#endif /* HWS_ACTION_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.c
new file mode 100644 (file)
index 0000000..b9aef80
--- /dev/null
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+#include "buddy.h"
+
+static int hws_buddy_init(struct mlx5hws_buddy_mem *buddy, u32 max_order)
+{
+       int i, s, ret = 0;
+
+       buddy->max_order = max_order;
+
+       buddy->bitmap = kcalloc(buddy->max_order + 1,
+                               sizeof(*buddy->bitmap),
+                               GFP_KERNEL);
+       if (!buddy->bitmap)
+               return -ENOMEM;
+
+       buddy->num_free = kcalloc(buddy->max_order + 1,
+                                 sizeof(*buddy->num_free),
+                                 GFP_KERNEL);
+       if (!buddy->num_free) {
+               ret = -ENOMEM;
+               goto err_out_free_bits;
+       }
+
+       for (i = 0; i <= (int)buddy->max_order; ++i) {
+               s = 1 << (buddy->max_order - i);
+
+               buddy->bitmap[i] = bitmap_zalloc(s, GFP_KERNEL);
+               if (!buddy->bitmap[i]) {
+                       ret = -ENOMEM;
+                       goto err_out_free_num_free;
+               }
+       }
+
+       bitmap_set(buddy->bitmap[buddy->max_order], 0, 1);
+       buddy->num_free[buddy->max_order] = 1;
+
+       return 0;
+
+err_out_free_num_free:
+       for (i = 0; i <= (int)buddy->max_order; ++i)
+               bitmap_free(buddy->bitmap[i]);
+
+       kfree(buddy->num_free);
+
+err_out_free_bits:
+       kfree(buddy->bitmap);
+       return ret;
+}
+
+struct mlx5hws_buddy_mem *mlx5hws_buddy_create(u32 max_order)
+{
+       struct mlx5hws_buddy_mem *buddy;
+
+       buddy = kzalloc(sizeof(*buddy), GFP_KERNEL);
+       if (!buddy)
+               return NULL;
+
+       if (hws_buddy_init(buddy, max_order))
+               goto free_buddy;
+
+       return buddy;
+
+free_buddy:
+       kfree(buddy);
+       return NULL;
+}
+
+void mlx5hws_buddy_cleanup(struct mlx5hws_buddy_mem *buddy)
+{
+       int i;
+
+       for (i = 0; i <= (int)buddy->max_order; ++i)
+               bitmap_free(buddy->bitmap[i]);
+
+       kfree(buddy->num_free);
+       kfree(buddy->bitmap);
+}
+
+static int hws_buddy_find_free_seg(struct mlx5hws_buddy_mem *buddy,
+                                  u32 start_order,
+                                  u32 *segment,
+                                  u32 *order)
+{
+       unsigned int seg, order_iter, m;
+
+       for (order_iter = start_order;
+            order_iter <= buddy->max_order; ++order_iter) {
+               if (!buddy->num_free[order_iter])
+                       continue;
+
+               m = 1 << (buddy->max_order - order_iter);
+               seg = find_first_bit(buddy->bitmap[order_iter], m);
+
+               if (WARN(seg >= m,
+                        "ICM Buddy: failed finding free mem for order %d\n",
+                        order_iter))
+                       return -ENOMEM;
+
+               break;
+       }
+
+       if (order_iter > buddy->max_order)
+               return -ENOMEM;
+
+       *segment = seg;
+       *order = order_iter;
+       return 0;
+}
+
+int mlx5hws_buddy_alloc_mem(struct mlx5hws_buddy_mem *buddy, u32 order)
+{
+       u32 seg, order_iter, err;
+
+       err = hws_buddy_find_free_seg(buddy, order, &seg, &order_iter);
+       if (err)
+               return err;
+
+       bitmap_clear(buddy->bitmap[order_iter], seg, 1);
+       --buddy->num_free[order_iter];
+
+       while (order_iter > order) {
+               --order_iter;
+               seg <<= 1;
+               bitmap_set(buddy->bitmap[order_iter], seg ^ 1, 1);
+               ++buddy->num_free[order_iter];
+       }
+
+       seg <<= order;
+
+       return seg;
+}
+
+void mlx5hws_buddy_free_mem(struct mlx5hws_buddy_mem *buddy, u32 seg, u32 order)
+{
+       seg >>= order;
+
+       while (test_bit(seg ^ 1, buddy->bitmap[order])) {
+               bitmap_clear(buddy->bitmap[order], seg ^ 1, 1);
+               --buddy->num_free[order];
+               seg >>= 1;
+               ++order;
+       }
+
+       bitmap_set(buddy->bitmap[order], seg, 1);
+       ++buddy->num_free[order];
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.h
new file mode 100644 (file)
index 0000000..ef6b223
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef HWS_BUDDY_H_
+#define HWS_BUDDY_H_
+
+struct mlx5hws_buddy_mem {
+       unsigned long **bitmap;
+       unsigned int *num_free;
+       u32 max_order;
+};
+
+struct mlx5hws_buddy_mem *mlx5hws_buddy_create(u32 max_order);
+
+void mlx5hws_buddy_cleanup(struct mlx5hws_buddy_mem *buddy);
+
+int mlx5hws_buddy_alloc_mem(struct mlx5hws_buddy_mem *buddy, u32 order);
+
+void mlx5hws_buddy_free_mem(struct mlx5hws_buddy_mem *buddy, u32 seg, u32 order);
+
+#endif /* HWS_BUDDY_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
new file mode 100644 (file)
index 0000000..baacf66
--- /dev/null
@@ -0,0 +1,995 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+static u16 hws_bwc_gen_queue_idx(struct mlx5hws_context *ctx)
+{
+       /* assign random queue */
+       return get_random_u8() % mlx5hws_bwc_queues(ctx);
+}
+
+static u16
+hws_bwc_get_burst_th(struct mlx5hws_context *ctx, u16 queue_id)
+{
+       return min(ctx->send_queue[queue_id].num_entries / 2,
+                  MLX5HWS_BWC_MATCHER_REHASH_BURST_TH);
+}
+
+static struct mutex *
+hws_bwc_get_queue_lock(struct mlx5hws_context *ctx, u16 idx)
+{
+       return &ctx->bwc_send_queue_locks[idx];
+}
+
+static void hws_bwc_lock_all_queues(struct mlx5hws_context *ctx)
+{
+       u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+       struct mutex *queue_lock; /* Protect the queue */
+       int i;
+
+       for (i = 0; i < bwc_queues; i++) {
+               queue_lock = hws_bwc_get_queue_lock(ctx, i);
+               mutex_lock(queue_lock);
+       }
+}
+
+static void hws_bwc_unlock_all_queues(struct mlx5hws_context *ctx)
+{
+       u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+       struct mutex *queue_lock; /* Protect the queue */
+       int i = bwc_queues;
+
+       while (i--) {
+               queue_lock = hws_bwc_get_queue_lock(ctx, i);
+               mutex_unlock(queue_lock);
+       }
+}
+
+static void hws_bwc_matcher_init_attr(struct mlx5hws_matcher_attr *attr,
+                                     u32 priority,
+                                     u8 size_log)
+{
+       memset(attr, 0, sizeof(*attr));
+
+       attr->priority = priority;
+       attr->optimize_using_rule_idx = 0;
+       attr->mode = MLX5HWS_MATCHER_RESOURCE_MODE_RULE;
+       attr->optimize_flow_src = MLX5HWS_MATCHER_FLOW_SRC_ANY;
+       attr->insert_mode = MLX5HWS_MATCHER_INSERT_BY_HASH;
+       attr->distribute_mode = MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH;
+       attr->rule.num_log = size_log;
+       attr->resizable = true;
+       attr->max_num_of_at_attach = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
+}
+
+int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
+                                     struct mlx5hws_table *table,
+                                     u32 priority,
+                                     u8 match_criteria_enable,
+                                     struct mlx5hws_match_parameters *mask,
+                                     enum mlx5hws_action_type action_types[])
+{
+       enum mlx5hws_action_type init_action_types[1] = { MLX5HWS_ACTION_TYP_LAST };
+       struct mlx5hws_context *ctx = table->ctx;
+       u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+       struct mlx5hws_matcher_attr attr = {0};
+       int i;
+
+       bwc_matcher->rules = kcalloc(bwc_queues, sizeof(*bwc_matcher->rules), GFP_KERNEL);
+       if (!bwc_matcher->rules)
+               goto err;
+
+       for (i = 0; i < bwc_queues; i++)
+               INIT_LIST_HEAD(&bwc_matcher->rules[i]);
+
+       hws_bwc_matcher_init_attr(&attr,
+                                 priority,
+                                 MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG);
+
+       bwc_matcher->priority = priority;
+       bwc_matcher->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
+
+       /* create dummy action template */
+       bwc_matcher->at[0] =
+               mlx5hws_action_template_create(action_types ?
+                                              action_types : init_action_types);
+       if (!bwc_matcher->at[0]) {
+               mlx5hws_err(table->ctx, "BWC matcher: failed creating action template\n");
+               goto free_bwc_matcher_rules;
+       }
+
+       bwc_matcher->num_of_at = 1;
+
+       bwc_matcher->mt = mlx5hws_match_template_create(ctx,
+                                                       mask->match_buf,
+                                                       mask->match_sz,
+                                                       match_criteria_enable);
+       if (!bwc_matcher->mt) {
+               mlx5hws_err(table->ctx, "BWC matcher: failed creating match template\n");
+               goto free_at;
+       }
+
+       bwc_matcher->matcher = mlx5hws_matcher_create(table,
+                                                     &bwc_matcher->mt, 1,
+                                                     &bwc_matcher->at[0],
+                                                     bwc_matcher->num_of_at,
+                                                     &attr);
+       if (!bwc_matcher->matcher) {
+               mlx5hws_err(table->ctx, "BWC matcher: failed creating HWS matcher\n");
+               goto free_mt;
+       }
+
+       return 0;
+
+free_mt:
+       mlx5hws_match_template_destroy(bwc_matcher->mt);
+free_at:
+       mlx5hws_action_template_destroy(bwc_matcher->at[0]);
+free_bwc_matcher_rules:
+       kfree(bwc_matcher->rules);
+err:
+       return -EINVAL;
+}
+
+struct mlx5hws_bwc_matcher *
+mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
+                          u32 priority,
+                          u8 match_criteria_enable,
+                          struct mlx5hws_match_parameters *mask)
+{
+       struct mlx5hws_bwc_matcher *bwc_matcher;
+       bool is_complex;
+       int ret;
+
+       if (!mlx5hws_context_bwc_supported(table->ctx)) {
+               mlx5hws_err(table->ctx,
+                           "BWC matcher: context created w/o BWC API compatibility\n");
+               return NULL;
+       }
+
+       bwc_matcher = kzalloc(sizeof(*bwc_matcher), GFP_KERNEL);
+       if (!bwc_matcher)
+               return NULL;
+
+       /* Check if the required match params can be all matched
+        * in single STE, otherwise complex matcher is needed.
+        */
+
+       is_complex = mlx5hws_bwc_match_params_is_complex(table->ctx, match_criteria_enable, mask);
+       if (is_complex)
+               ret = mlx5hws_bwc_matcher_create_complex(bwc_matcher,
+                                                        table,
+                                                        priority,
+                                                        match_criteria_enable,
+                                                        mask);
+       else
+               ret = mlx5hws_bwc_matcher_create_simple(bwc_matcher,
+                                                       table,
+                                                       priority,
+                                                       match_criteria_enable,
+                                                       mask,
+                                                       NULL);
+       if (ret)
+               goto free_bwc_matcher;
+
+       return bwc_matcher;
+
+free_bwc_matcher:
+       kfree(bwc_matcher);
+
+       return NULL;
+}
+
+int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+       int i;
+
+       mlx5hws_matcher_destroy(bwc_matcher->matcher);
+       bwc_matcher->matcher = NULL;
+
+       for (i = 0; i < bwc_matcher->num_of_at; i++)
+               mlx5hws_action_template_destroy(bwc_matcher->at[i]);
+
+       mlx5hws_match_template_destroy(bwc_matcher->mt);
+       kfree(bwc_matcher->rules);
+
+       return 0;
+}
+
+int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+       if (bwc_matcher->num_of_rules)
+               mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
+                           "BWC matcher destroy: matcher still has %d rules\n",
+                           bwc_matcher->num_of_rules);
+
+       mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
+
+       kfree(bwc_matcher);
+       return 0;
+}
+
+static int hws_bwc_queue_poll(struct mlx5hws_context *ctx,
+                             u16 queue_id,
+                             u32 *pending_rules,
+                             bool drain)
+{
+       struct mlx5hws_flow_op_result comp[MLX5HWS_BWC_MATCHER_REHASH_BURST_TH];
+       u16 burst_th = hws_bwc_get_burst_th(ctx, queue_id);
+       bool got_comp = *pending_rules >= burst_th;
+       bool queue_full;
+       int err = 0;
+       int ret;
+       int i;
+
+       /* Check if there are any completions at all */
+       if (!got_comp && !drain)
+               return 0;
+
+       queue_full = mlx5hws_send_engine_full(&ctx->send_queue[queue_id]);
+       while (queue_full || ((got_comp || drain) && *pending_rules)) {
+               ret = mlx5hws_send_queue_poll(ctx, queue_id, comp, burst_th);
+               if (unlikely(ret < 0)) {
+                       mlx5hws_err(ctx, "BWC poll error: polling queue %d returned %d\n",
+                                   queue_id, ret);
+                       return -EINVAL;
+               }
+
+               if (ret) {
+                       (*pending_rules) -= ret;
+                       for (i = 0; i < ret; i++) {
+                               if (unlikely(comp[i].status != MLX5HWS_FLOW_OP_SUCCESS)) {
+                                       mlx5hws_err(ctx,
+                                                   "BWC poll error: polling queue %d returned completion with error\n",
+                                                   queue_id);
+                                       err = -EINVAL;
+                               }
+                       }
+                       queue_full = false;
+               }
+
+               got_comp = !!ret;
+       }
+
+       return err;
+}
+
+void
+mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
+                          u16 bwc_queue_idx,
+                          u32 flow_source,
+                          struct mlx5hws_rule_attr *rule_attr)
+{
+       struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+
+       /* no use of INSERT_BY_INDEX in bwc rule */
+       rule_attr->rule_idx = 0;
+
+       /* notify HW at each rule insertion/deletion */
+       rule_attr->burst = 0;
+
+       /* We don't need user data, but the API requires it to exist */
+       rule_attr->user_data = (void *)0xFACADE;
+
+       rule_attr->queue_id = mlx5hws_bwc_get_queue_id(ctx, bwc_queue_idx);
+       rule_attr->flow_source = flow_source;
+}
+
+struct mlx5hws_bwc_rule *
+mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+       struct mlx5hws_bwc_rule *bwc_rule;
+
+       bwc_rule = kzalloc(sizeof(*bwc_rule), GFP_KERNEL);
+       if (unlikely(!bwc_rule))
+               goto out_err;
+
+       bwc_rule->rule = kzalloc(sizeof(*bwc_rule->rule), GFP_KERNEL);
+       if (unlikely(!bwc_rule->rule))
+               goto free_rule;
+
+       bwc_rule->bwc_matcher = bwc_matcher;
+       return bwc_rule;
+
+free_rule:
+       kfree(bwc_rule);
+out_err:
+       return NULL;
+}
+
+void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule)
+{
+       if (likely(bwc_rule->rule))
+               kfree(bwc_rule->rule);
+       kfree(bwc_rule);
+}
+
+static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx)
+{
+       struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+
+       bwc_matcher->num_of_rules++;
+       bwc_rule->bwc_queue_idx = idx;
+       list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]);
+}
+
+static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule)
+{
+       struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+
+       bwc_matcher->num_of_rules--;
+       list_del_init(&bwc_rule->list_node);
+}
+
+static int
+hws_bwc_rule_destroy_hws_async(struct mlx5hws_bwc_rule *bwc_rule,
+                              struct mlx5hws_rule_attr *attr)
+{
+       return mlx5hws_rule_destroy(bwc_rule->rule, attr);
+}
+
+static int
+hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule,
+                             struct mlx5hws_rule_attr *rule_attr)
+{
+       struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
+       struct mlx5hws_flow_op_result completion;
+       int ret;
+
+       ret = hws_bwc_rule_destroy_hws_async(bwc_rule, rule_attr);
+       if (unlikely(ret))
+               return ret;
+
+       do {
+               ret = mlx5hws_send_queue_poll(ctx, rule_attr->queue_id, &completion, 1);
+       } while (ret != 1);
+
+       if (unlikely(completion.status != MLX5HWS_FLOW_OP_SUCCESS ||
+                    (bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED &&
+                     bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING))) {
+               mlx5hws_err(ctx, "Failed destroying BWC rule: completion %d, rule status %d\n",
+                           completion.status, bwc_rule->rule->status);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
+{
+       struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+       struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+       u16 idx = bwc_rule->bwc_queue_idx;
+       struct mlx5hws_rule_attr attr;
+       struct mutex *queue_lock; /* Protect the queue */
+       int ret;
+
+       mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, 0, &attr);
+
+       queue_lock = hws_bwc_get_queue_lock(ctx, idx);
+
+       mutex_lock(queue_lock);
+
+       ret = hws_bwc_rule_destroy_hws_sync(bwc_rule, &attr);
+       hws_bwc_rule_list_remove(bwc_rule);
+
+       mutex_unlock(queue_lock);
+
+       return ret;
+}
+
+int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule)
+{
+       int ret;
+
+       ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
+
+       mlx5hws_bwc_rule_free(bwc_rule);
+       return ret;
+}
+
+static int
+hws_bwc_rule_create_async(struct mlx5hws_bwc_rule *bwc_rule,
+                         u32 *match_param,
+                         u8 at_idx,
+                         struct mlx5hws_rule_action rule_actions[],
+                         struct mlx5hws_rule_attr *rule_attr)
+{
+       return mlx5hws_rule_create(bwc_rule->bwc_matcher->matcher,
+                                  0, /* only one match template supported */
+                                  match_param,
+                                  at_idx,
+                                  rule_actions,
+                                  rule_attr,
+                                  bwc_rule->rule);
+}
+
+static int
+hws_bwc_rule_create_sync(struct mlx5hws_bwc_rule *bwc_rule,
+                        u32 *match_param,
+                        u8 at_idx,
+                        struct mlx5hws_rule_action rule_actions[],
+                        struct mlx5hws_rule_attr *rule_attr)
+
+{
+       struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
+       u32 expected_completions = 1;
+       int ret;
+
+       ret = hws_bwc_rule_create_async(bwc_rule, match_param,
+                                       at_idx, rule_actions,
+                                       rule_attr);
+       if (unlikely(ret))
+               return ret;
+
+       ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
+
+       return ret;
+}
+
+static int
+hws_bwc_rule_update_sync(struct mlx5hws_bwc_rule *bwc_rule,
+                        u8 at_idx,
+                        struct mlx5hws_rule_action rule_actions[],
+                        struct mlx5hws_rule_attr *rule_attr)
+{
+       struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+       struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+       u32 expected_completions = 1;
+       int ret;
+
+       ret = mlx5hws_rule_action_update(bwc_rule->rule,
+                                        at_idx,
+                                        rule_actions,
+                                        rule_attr);
+       if (unlikely(ret))
+               return ret;
+
+       ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
+       if (unlikely(ret))
+               mlx5hws_err(ctx, "Failed updating BWC rule (%d)\n", ret);
+
+       return ret;
+}
+
+static bool
+hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+       struct mlx5hws_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps;
+
+       return bwc_matcher->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH >=
+              caps->ste_alloc_log_max - 1;
+}
+
+static bool
+hws_bwc_matcher_rehash_size_needed(struct mlx5hws_bwc_matcher *bwc_matcher,
+                                  u32 num_of_rules)
+{
+       if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher)))
+               return false;
+
+       if (unlikely((num_of_rules * 100 / MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH) >=
+                    (1UL << bwc_matcher->size_log)))
+               return true;
+
+       return false;
+}
+
+static void
+hws_bwc_rule_actions_to_action_types(struct mlx5hws_rule_action rule_actions[],
+                                    enum mlx5hws_action_type action_types[])
+{
+       int i = 0;
+
+       for (i = 0;
+            rule_actions[i].action && (rule_actions[i].action->type != MLX5HWS_ACTION_TYP_LAST);
+            i++) {
+               action_types[i] = (enum mlx5hws_action_type)rule_actions[i].action->type;
+       }
+
+       action_types[i] = MLX5HWS_ACTION_TYP_LAST;
+}
+
+static int
+hws_bwc_matcher_extend_at(struct mlx5hws_bwc_matcher *bwc_matcher,
+                         struct mlx5hws_rule_action rule_actions[])
+{
+       enum mlx5hws_action_type action_types[MLX5HWS_BWC_MAX_ACTS];
+
+       hws_bwc_rule_actions_to_action_types(rule_actions, action_types);
+
+       bwc_matcher->at[bwc_matcher->num_of_at] =
+               mlx5hws_action_template_create(action_types);
+
+       if (unlikely(!bwc_matcher->at[bwc_matcher->num_of_at]))
+               return -ENOMEM;
+
+       bwc_matcher->num_of_at++;
+       return 0;
+}
+
+static int
+hws_bwc_matcher_extend_size(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+       struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+       struct mlx5hws_cmd_query_caps *caps = ctx->caps;
+
+       if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher))) {
+               mlx5hws_err(ctx, "Can't resize matcher: depth exceeds limit %d\n",
+                           caps->rtc_log_depth_max);
+               return -ENOMEM;
+       }
+
+       bwc_matcher->size_log =
+               min(bwc_matcher->size_log + MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
+                   caps->ste_alloc_log_max - MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH);
+
+       return 0;
+}
+
+static int
+hws_bwc_matcher_find_at(struct mlx5hws_bwc_matcher *bwc_matcher,
+                       struct mlx5hws_rule_action rule_actions[])
+{
+       enum mlx5hws_action_type *action_type_arr;
+       int i, j;
+
+       /* start from index 1 - first action template is a dummy */
+       for (i = 1; i < bwc_matcher->num_of_at; i++) {
+               j = 0;
+               action_type_arr = bwc_matcher->at[i]->action_type_arr;
+
+               while (rule_actions[j].action &&
+                      rule_actions[j].action->type != MLX5HWS_ACTION_TYP_LAST) {
+                       if (action_type_arr[j] != rule_actions[j].action->type)
+                               break;
+                       j++;
+               }
+
+               if (action_type_arr[j] == MLX5HWS_ACTION_TYP_LAST &&
+                   (!rule_actions[j].action ||
+                    rule_actions[j].action->type == MLX5HWS_ACTION_TYP_LAST))
+                       return i;
+       }
+
+       return -1;
+}
+
+static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+       struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+       u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+       struct mlx5hws_bwc_rule **bwc_rules;
+       struct mlx5hws_rule_attr rule_attr;
+       u32 *pending_rules;
+       int i, j, ret = 0;
+       bool all_done;
+       u16 burst_th;
+
+       mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
+
+       pending_rules = kcalloc(bwc_queues, sizeof(*pending_rules), GFP_KERNEL);
+       if (!pending_rules)
+               return -ENOMEM;
+
+       bwc_rules = kcalloc(bwc_queues, sizeof(*bwc_rules), GFP_KERNEL);
+       if (!bwc_rules) {
+               ret = -ENOMEM;
+               goto free_pending_rules;
+       }
+
+       for (i = 0; i < bwc_queues; i++) {
+               if (list_empty(&bwc_matcher->rules[i]))
+                       bwc_rules[i] = NULL;
+               else
+                       bwc_rules[i] = list_first_entry(&bwc_matcher->rules[i],
+                                                       struct mlx5hws_bwc_rule,
+                                                       list_node);
+       }
+
+       do {
+               all_done = true;
+
+               for (i = 0; i < bwc_queues; i++) {
+                       rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
+                       burst_th = hws_bwc_get_burst_th(ctx, rule_attr.queue_id);
+
+                       for (j = 0; j < burst_th && bwc_rules[i]; j++) {
+                               rule_attr.burst = !!((j + 1) % burst_th);
+                               ret = mlx5hws_matcher_resize_rule_move(bwc_matcher->matcher,
+                                                                      bwc_rules[i]->rule,
+                                                                      &rule_attr);
+                               if (unlikely(ret)) {
+                                       mlx5hws_err(ctx,
+                                                   "Moving BWC rule failed during rehash (%d)\n",
+                                                   ret);
+                                       goto free_bwc_rules;
+                               }
+
+                               all_done = false;
+                               pending_rules[i]++;
+                               bwc_rules[i] = list_is_last(&bwc_rules[i]->list_node,
+                                                           &bwc_matcher->rules[i]) ?
+                                              NULL : list_next_entry(bwc_rules[i], list_node);
+
+                               ret = hws_bwc_queue_poll(ctx, rule_attr.queue_id,
+                                                        &pending_rules[i], false);
+                               if (unlikely(ret))
+                                       goto free_bwc_rules;
+                       }
+               }
+       } while (!all_done);
+
+       /* drain all the bwc queues */
+       for (i = 0; i < bwc_queues; i++) {
+               if (pending_rules[i]) {
+                       u16 queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
+
+                       mlx5hws_send_engine_flush_queue(&ctx->send_queue[queue_id]);
+                       ret = hws_bwc_queue_poll(ctx, queue_id,
+                                                &pending_rules[i], true);
+                       if (unlikely(ret))
+                               goto free_bwc_rules;
+               }
+       }
+
+free_bwc_rules:
+       kfree(bwc_rules);
+free_pending_rules:
+       kfree(pending_rules);
+
+       return ret;
+}
+
+static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+       return hws_bwc_matcher_move_all_simple(bwc_matcher);
+}
+
+static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+       struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+       struct mlx5hws_matcher_attr matcher_attr = {0};
+       struct mlx5hws_matcher *old_matcher;
+       struct mlx5hws_matcher *new_matcher;
+       int ret;
+
+       hws_bwc_matcher_init_attr(&matcher_attr,
+                                 bwc_matcher->priority,
+                                 bwc_matcher->size_log);
+
+       old_matcher = bwc_matcher->matcher;
+       new_matcher = mlx5hws_matcher_create(old_matcher->tbl,
+                                            &bwc_matcher->mt, 1,
+                                            bwc_matcher->at,
+                                            bwc_matcher->num_of_at,
+                                            &matcher_attr);
+       if (!new_matcher) {
+               mlx5hws_err(ctx, "Rehash error: matcher creation failed\n");
+               return -ENOMEM;
+       }
+
+       ret = mlx5hws_matcher_resize_set_target(old_matcher, new_matcher);
+       if (ret) {
+               mlx5hws_err(ctx, "Rehash error: failed setting resize target\n");
+               return ret;
+       }
+
+       ret = hws_bwc_matcher_move_all(bwc_matcher);
+       if (ret) {
+               mlx5hws_err(ctx, "Rehash error: moving rules failed\n");
+               return -ENOMEM;
+       }
+
+       bwc_matcher->matcher = new_matcher;
+       mlx5hws_matcher_destroy(old_matcher);
+
+       return 0;
+}
+
+static int
+hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+       int ret;
+
+       /* If the current matcher size is already at its max size, we can't
+        * do the rehash. Skip it and try adding the rule again - perhaps
+        * there was some change.
+        */
+       if (hws_bwc_matcher_size_maxed_out(bwc_matcher))
+               return 0;
+
+       /* It is possible that other rule has already performed rehash.
+        * Need to check again if we really need rehash.
+        * If the reason for rehash was size, but not any more - skip rehash.
+        */
+       if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, bwc_matcher->num_of_rules))
+               return 0;
+
+       /* Now we're done all the checking - do the rehash:
+        *  - extend match RTC size
+        *  - create new matcher
+        *  - move all the rules to the new matcher
+        *  - destroy the old matcher
+        */
+
+       ret = hws_bwc_matcher_extend_size(bwc_matcher);
+       if (ret)
+               return ret;
+
+       return hws_bwc_matcher_move(bwc_matcher);
+}
+
+static int
+hws_bwc_matcher_rehash_at(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+       /* Rehash by action template doesn't require any additional checking.
+        * The bwc_matcher already contains the new action template.
+        * Just do the usual rehash:
+        *  - create new matcher
+        *  - move all the rules to the new matcher
+        *  - destroy the old matcher
+        */
+       return hws_bwc_matcher_move(bwc_matcher);
+}
+
+int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
+                                  u32 *match_param,
+                                  struct mlx5hws_rule_action rule_actions[],
+                                  u32 flow_source,
+                                  u16 bwc_queue_idx)
+{
+       struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+       struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+       struct mlx5hws_rule_attr rule_attr;
+       struct mutex *queue_lock; /* Protect the queue */
+       u32 num_of_rules;
+       int ret = 0;
+       int at_idx;
+
+       mlx5hws_bwc_rule_fill_attr(bwc_matcher, bwc_queue_idx, flow_source, &rule_attr);
+
+       queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
+
+       mutex_lock(queue_lock);
+
+       /* check if rehash needed due to missing action template */
+       at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+       if (unlikely(at_idx < 0)) {
+               /* we need to extend BWC matcher action templates array */
+               mutex_unlock(queue_lock);
+               hws_bwc_lock_all_queues(ctx);
+
+               ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
+               if (unlikely(ret)) {
+                       hws_bwc_unlock_all_queues(ctx);
+                       return ret;
+               }
+
+               /* action templates array was extended, we need the last idx */
+               at_idx = bwc_matcher->num_of_at - 1;
+
+               ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
+                                               bwc_matcher->at[at_idx]);
+               if (unlikely(ret)) {
+                       /* Action template attach failed, possibly due to
+                        * requiring more action STEs.
+                        * Need to attempt creating new matcher with all
+                        * the action templates, including the new one.
+                        */
+                       ret = hws_bwc_matcher_rehash_at(bwc_matcher);
+                       if (unlikely(ret)) {
+                               mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]);
+                               bwc_matcher->at[at_idx] = NULL;
+                               bwc_matcher->num_of_at--;
+
+                               hws_bwc_unlock_all_queues(ctx);
+
+                               mlx5hws_err(ctx,
+                                           "BWC rule insertion: rehash AT failed (%d)\n", ret);
+                               return ret;
+                       }
+               }
+
+               hws_bwc_unlock_all_queues(ctx);
+               mutex_lock(queue_lock);
+       }
+
+       /* check if number of rules require rehash */
+       num_of_rules = bwc_matcher->num_of_rules;
+
+       if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) {
+               mutex_unlock(queue_lock);
+
+               hws_bwc_lock_all_queues(ctx);
+               ret = hws_bwc_matcher_rehash_size(bwc_matcher);
+               hws_bwc_unlock_all_queues(ctx);
+
+               if (ret) {
+                       mlx5hws_err(ctx, "BWC rule insertion: rehash size [%d -> %d] failed (%d)\n",
+                                   bwc_matcher->size_log - MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
+                                   bwc_matcher->size_log,
+                                   ret);
+                       return ret;
+               }
+
+               mutex_lock(queue_lock);
+       }
+
+       ret = hws_bwc_rule_create_sync(bwc_rule,
+                                      match_param,
+                                      at_idx,
+                                      rule_actions,
+                                      &rule_attr);
+       if (likely(!ret)) {
+               hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx);
+               mutex_unlock(queue_lock);
+               return 0; /* rule inserted successfully */
+       }
+
+       /* At this point the rule wasn't added.
+        * It could be because there was collision, or some other problem.
+        * If we don't dive deeper than API, the only thing we know is that
+        * the status of completion is RTE_FLOW_OP_ERROR.
+        * Try rehash by size and insert rule again - last chance.
+        */
+
+       mutex_unlock(queue_lock);
+
+       hws_bwc_lock_all_queues(ctx);
+       ret = hws_bwc_matcher_rehash_size(bwc_matcher);
+       hws_bwc_unlock_all_queues(ctx);
+
+       if (ret) {
+               mlx5hws_err(ctx, "BWC rule insertion: rehash failed (%d)\n", ret);
+               return ret;
+       }
+
+       /* Rehash done, but we still have that pesky rule to add */
+       mutex_lock(queue_lock);
+
+       ret = hws_bwc_rule_create_sync(bwc_rule,
+                                      match_param,
+                                      at_idx,
+                                      rule_actions,
+                                      &rule_attr);
+
+       if (unlikely(ret)) {
+               mutex_unlock(queue_lock);
+               mlx5hws_err(ctx, "BWC rule insertion failed (%d)\n", ret);
+               return ret;
+       }
+
+       hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx);
+       mutex_unlock(queue_lock);
+
+       return 0;
+}
+
+struct mlx5hws_bwc_rule *
+mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher,
+                       struct mlx5hws_match_parameters *params,
+                       u32 flow_source,
+                       struct mlx5hws_rule_action rule_actions[])
+{
+       struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+       struct mlx5hws_bwc_rule *bwc_rule;
+       u16 bwc_queue_idx;
+       int ret;
+
+       if (unlikely(!mlx5hws_context_bwc_supported(ctx))) {
+               mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n");
+               return NULL;
+       }
+
+       bwc_rule = mlx5hws_bwc_rule_alloc(bwc_matcher);
+       if (unlikely(!bwc_rule))
+               return NULL;
+
+       bwc_queue_idx = hws_bwc_gen_queue_idx(ctx);
+
+       ret = mlx5hws_bwc_rule_create_simple(bwc_rule,
+                                            params->match_buf,
+                                            rule_actions,
+                                            flow_source,
+                                            bwc_queue_idx);
+       if (unlikely(ret)) {
+               mlx5hws_bwc_rule_free(bwc_rule);
+               return NULL;
+       }
+
+       return bwc_rule;
+}
+
+static int
+hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
+                          struct mlx5hws_rule_action rule_actions[])
+{
+       struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+       struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+       struct mlx5hws_rule_attr rule_attr;
+       struct mutex *queue_lock; /* Protect the queue */
+       int at_idx, ret;
+       u16 idx;
+
+       idx = bwc_rule->bwc_queue_idx;
+
+       mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, 0, &rule_attr);
+       queue_lock = hws_bwc_get_queue_lock(ctx, idx);
+
+       mutex_lock(queue_lock);
+
+       /* check if rehash needed due to missing action template */
+       at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+       if (unlikely(at_idx < 0)) {
+               /* we need to extend BWC matcher action templates array */
+               mutex_unlock(queue_lock);
+               hws_bwc_lock_all_queues(ctx);
+
+               /* check again - perhaps other thread already did extend_at */
+               at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+               if (likely(at_idx < 0)) {
+                       ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
+                       if (unlikely(ret)) {
+                               hws_bwc_unlock_all_queues(ctx);
+                               mlx5hws_err(ctx, "BWC rule update: failed extending AT (%d)", ret);
+                               return -EINVAL;
+                       }
+
+                       /* action templates array was extended, we need the last idx */
+                       at_idx = bwc_matcher->num_of_at - 1;
+
+                       ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
+                                                       bwc_matcher->at[at_idx]);
+                       if (unlikely(ret)) {
+                               /* Action template attach failed, possibly due to
+                                * requiring more action STEs.
+                                * Need to attempt creating new matcher with all
+                                * the action templates, including the new one.
+                                */
+                               ret = hws_bwc_matcher_rehash_at(bwc_matcher);
+                               if (unlikely(ret)) {
+                                       mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]);
+                                       bwc_matcher->at[at_idx] = NULL;
+                                       bwc_matcher->num_of_at--;
+
+                                       hws_bwc_unlock_all_queues(ctx);
+
+                                       mlx5hws_err(ctx,
+                                                   "BWC rule update: rehash AT failed (%d)\n",
+                                                   ret);
+                                       return ret;
+                               }
+                       }
+               }
+
+               hws_bwc_unlock_all_queues(ctx);
+               mutex_lock(queue_lock);
+       }
+
+       ret = hws_bwc_rule_update_sync(bwc_rule,
+                                      at_idx,
+                                      rule_actions,
+                                      &rule_attr);
+       mutex_unlock(queue_lock);
+
+       if (unlikely(ret))
+               mlx5hws_err(ctx, "BWC rule: update failed (%d)\n", ret);
+
+       return ret;
+}
+
+int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
+                                  struct mlx5hws_rule_action rule_actions[])
+{
+       struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+       struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+
+       if (unlikely(!mlx5hws_context_bwc_supported(ctx))) {
+               mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n");
+               return -EINVAL;
+       }
+
+       return hws_bwc_rule_action_update(bwc_rule, rule_actions);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
new file mode 100644 (file)
index 0000000..0b74596
--- /dev/null
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef HWS_BWC_H_
+#define HWS_BWC_H_
+
+#define MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG 1
+#define MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP 1
+#define MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH 70
+#define MLX5HWS_BWC_MATCHER_REHASH_BURST_TH 32
+#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 255
+
+#define MLX5HWS_BWC_MAX_ACTS 16
+
+struct mlx5hws_bwc_matcher {
+       struct mlx5hws_matcher *matcher;
+       struct mlx5hws_match_template *mt;
+       struct mlx5hws_action_template *at[MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM];
+       u8 num_of_at;
+       u16 priority;
+       u8 size_log;
+       u32 num_of_rules; /* atomically accessed */
+       struct list_head *rules;
+};
+
+struct mlx5hws_bwc_rule {
+       struct mlx5hws_bwc_matcher *bwc_matcher;
+       struct mlx5hws_rule *rule;
+       u16 bwc_queue_idx;
+       struct list_head list_node;
+};
+
+int
+mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
+                                 struct mlx5hws_table *table,
+                                 u32 priority,
+                                 u8 match_criteria_enable,
+                                 struct mlx5hws_match_parameters *mask,
+                                 enum mlx5hws_action_type action_types[]);
+
+int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+struct mlx5hws_bwc_rule *mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule);
+
+int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
+                                  u32 *match_param,
+                                  struct mlx5hws_rule_action rule_actions[],
+                                  u32 flow_source,
+                                  u16 bwc_queue_idx);
+
+int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule);
+
+void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
+                               u16 bwc_queue_idx,
+                               u32 flow_source,
+                               struct mlx5hws_rule_attr *rule_attr);
+
+static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx)
+{
+       /* Besides the control queue, half of the queues are
+        * reguler HWS queues, and the other half are BWC queues.
+        */
+       return (ctx->queues - 1) / 2;
+}
+
+static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx)
+{
+       return idx + mlx5hws_bwc_queues(ctx);
+}
+
+#endif /* HWS_BWC_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
new file mode 100644 (file)
index 0000000..c00010c
--- /dev/null
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
+                                        u8 match_criteria_enable,
+                                        struct mlx5hws_match_parameters *mask)
+{
+       struct mlx5hws_definer match_layout = {0};
+       struct mlx5hws_match_template *mt;
+       bool is_complex = false;
+       int ret;
+
+       if (!match_criteria_enable)
+               return false; /* empty matcher */
+
+       mt = mlx5hws_match_template_create(ctx,
+                                          mask->match_buf,
+                                          mask->match_sz,
+                                          match_criteria_enable);
+       if (!mt) {
+               mlx5hws_err(ctx, "BWC: failed creating match template\n");
+               return false;
+       }
+
+       ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
+       if (ret) {
+               /* The only case that we're interested in is E2BIG,
+                * which means that the match parameters need to be
+                * split into complex martcher.
+                * For all other cases (good or bad) - just return true
+                * and let the usual match creation path handle it,
+                * both for good and bad flows.
+                */
+               if (ret == -E2BIG) {
+                       is_complex = true;
+                       mlx5hws_dbg(ctx, "Matcher definer layout: need complex matcher\n");
+               } else {
+                       mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
+               }
+       }
+
+       mlx5hws_match_template_destroy(mt);
+
+       return is_complex;
+}
+
+int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
+                                      struct mlx5hws_table *table,
+                                      u32 priority,
+                                      u8 match_criteria_enable,
+                                      struct mlx5hws_match_parameters *mask)
+{
+       mlx5hws_err(table->ctx, "Complex matcher is not supported yet\n");
+       return -EOPNOTSUPP;
+}
+
+void
+mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+       /* nothing to do here */
+}
+
+int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
+                                   struct mlx5hws_match_parameters *params,
+                                   u32 flow_source,
+                                   struct mlx5hws_rule_action rule_actions[],
+                                   u16 bwc_queue_idx)
+{
+       mlx5hws_err(bwc_rule->bwc_matcher->matcher->tbl->ctx,
+                   "Complex rule is not supported yet\n");
+       return -EOPNOTSUPP;
+}
+
+int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule)
+{
+       return 0;
+}
+
+int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+       mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
+                   "Moving complex rule is not supported yet\n");
+       return -EOPNOTSUPP;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h
new file mode 100644 (file)
index 0000000..340f068
--- /dev/null
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef HWS_BWC_COMPLEX_H_
+#define HWS_BWC_COMPLEX_H_
+
+bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
+                                        u8 match_criteria_enable,
+                                        struct mlx5hws_match_parameters *mask);
+
+int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
+                                      struct mlx5hws_table *table,
+                                      u32 priority,
+                                      u8 match_criteria_enable,
+                                      struct mlx5hws_match_parameters *mask);
+
+void mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
+                                   struct mlx5hws_match_parameters *params,
+                                   u32 flow_source,
+                                   struct mlx5hws_rule_action rule_actions[],
+                                   u16 bwc_queue_idx);
+
+int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule);
+
+#endif /* HWS_BWC_COMPLEX_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
new file mode 100644 (file)
index 0000000..c00c138
--- /dev/null
@@ -0,0 +1,1300 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+static enum mlx5_ifc_flow_destination_type
+hws_cmd_dest_type_to_ifc_dest_type(enum mlx5_flow_destination_type type)
+{
+       switch (type) {
+       case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+               return MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
+       case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+               return MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+       case MLX5_FLOW_DESTINATION_TYPE_TIR:
+               return MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
+       case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+               return MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
+       case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
+               return MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
+       case MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE:
+               return MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+       case MLX5_FLOW_DESTINATION_TYPE_NONE:
+       case MLX5_FLOW_DESTINATION_TYPE_PORT:
+       case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
+       case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
+       case MLX5_FLOW_DESTINATION_TYPE_RANGE:
+       default:
+               pr_warn("HWS: unknown flow dest type %d\n", type);
+               return 0;
+       }
+};
+
+static int hws_cmd_general_obj_destroy(struct mlx5_core_dev *mdev,
+                                      u32 object_type,
+                                      u32 object_id)
+{
+       u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+       MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, object_type);
+       MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, object_id);
+
+       return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev,
+                                 struct mlx5hws_cmd_ft_create_attr *ft_attr,
+                                 u32 *table_id)
+{
+       u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
+       u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
+       void *ft_ctx;
+       int ret;
+
+       MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
+       MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type);
+
+       ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
+       MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
+       MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid);
+       MLX5_SET(flow_table_context, ft_ctx, reformat_en, ft_attr->reformat_en);
+       MLX5_SET(flow_table_context, ft_ctx, decap_en, ft_attr->decap_en);
+
+       ret = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out);
+       if (ret)
+               return ret;
+
+       *table_id = MLX5_GET(create_flow_table_out, out, table_id);
+
+       return 0;
+}
+
+int mlx5hws_cmd_flow_table_modify(struct mlx5_core_dev *mdev,
+                                 struct mlx5hws_cmd_ft_modify_attr *ft_attr,
+                                 u32 table_id)
+{
+       u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
+       void *ft_ctx;
+
+       MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE);
+       MLX5_SET(modify_flow_table_in, in, table_type, ft_attr->type);
+       MLX5_SET(modify_flow_table_in, in, modify_field_select, ft_attr->modify_fs);
+       MLX5_SET(modify_flow_table_in, in, table_id, table_id);
+
+       ft_ctx = MLX5_ADDR_OF(modify_flow_table_in, in, flow_table_context);
+
+       MLX5_SET(flow_table_context, ft_ctx, table_miss_action, ft_attr->table_miss_action);
+       MLX5_SET(flow_table_context, ft_ctx, table_miss_id, ft_attr->table_miss_id);
+       MLX5_SET(flow_table_context, ft_ctx, hws.rtc_id_0, ft_attr->rtc_id_0);
+       MLX5_SET(flow_table_context, ft_ctx, hws.rtc_id_1, ft_attr->rtc_id_1);
+
+       return mlx5_cmd_exec_in(mdev, modify_flow_table, in);
+}
+
+int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev,
+                                u32 table_id,
+                                struct mlx5hws_cmd_ft_query_attr *ft_attr,
+                                u64 *icm_addr_0, u64 *icm_addr_1)
+{
+       u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {0};
+       u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {0};
+       void *ft_ctx;
+       int ret;
+
+       MLX5_SET(query_flow_table_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_TABLE);
+       MLX5_SET(query_flow_table_in, in, table_type, ft_attr->type);
+       MLX5_SET(query_flow_table_in, in, table_id, table_id);
+
+       ret = mlx5_cmd_exec_inout(mdev, query_flow_table, in, out);
+       if (ret)
+               return ret;
+
+       ft_ctx = MLX5_ADDR_OF(query_flow_table_out, out, flow_table_context);
+       *icm_addr_0 = MLX5_GET64(flow_table_context, ft_ctx, sws.sw_owner_icm_root_0);
+       *icm_addr_1 = MLX5_GET64(flow_table_context, ft_ctx, sws.sw_owner_icm_root_1);
+
+       return ret;
+}
+
+int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
+                                  u8 fw_ft_type, u32 table_id)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
+
+       MLX5_SET(destroy_flow_table_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+       MLX5_SET(destroy_flow_table_in, in, table_type, fw_ft_type);
+       MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
+
+       return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
+}
+
+void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev,
+                                         u32 table_id)
+{
+       hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_FT_ALIAS, table_id);
+}
+
+static int hws_cmd_flow_group_create(struct mlx5_core_dev *mdev,
+                                    struct mlx5hws_cmd_fg_attr *fg_attr,
+                                    u32 *group_id)
+{
+       u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
+       int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+       u32 *in;
+       int ret;
+
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
+       MLX5_SET(create_flow_group_in, in, table_type, fg_attr->table_type);
+       MLX5_SET(create_flow_group_in, in, table_id, fg_attr->table_id);
+
+       ret = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out);
+       if (ret)
+               goto out;
+
+       *group_id = MLX5_GET(create_flow_group_out, out, group_id);
+
+out:
+       kvfree(in);
+       return ret;
+}
+
+static int hws_cmd_flow_group_destroy(struct mlx5_core_dev *mdev,
+                                     u32 ft_id, u32 fg_id, u8 ft_type)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
+
+       MLX5_SET(destroy_flow_group_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+       MLX5_SET(destroy_flow_group_in, in, table_type, ft_type);
+       MLX5_SET(destroy_flow_group_in, in, table_id, ft_id);
+       MLX5_SET(destroy_flow_group_in, in, group_id, fg_id);
+
+       return mlx5_cmd_exec_in(mdev, destroy_flow_group, in);
+}
+
+int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev,
+                       u32 table_type,
+                       u32 table_id,
+                       u32 group_id,
+                       struct mlx5hws_cmd_set_fte_attr *fte_attr)
+{
+       u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
+       void *in_flow_context;
+       u32 dest_entry_sz;
+       u32 total_dest_sz;
+       u32 action_flags;
+       u8 *in_dests;
+       u32 inlen;
+       u32 *in;
+       int ret;
+       u32 i;
+
+       dest_entry_sz = fte_attr->extended_dest ?
+                       MLX5_ST_SZ_BYTES(extended_dest_format) :
+                       MLX5_ST_SZ_BYTES(dest_format);
+       total_dest_sz = dest_entry_sz * fte_attr->dests_num;
+       inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE);
+       in = kzalloc(inlen, GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+       MLX5_SET(set_fte_in, in, table_type, table_type);
+       MLX5_SET(set_fte_in, in, table_id, table_id);
+
+       in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+       MLX5_SET(flow_context, in_flow_context, group_id, group_id);
+       MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source);
+       MLX5_SET(flow_context, in_flow_context, extended_destination, fte_attr->extended_dest);
+       MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
+
+       action_flags = fte_attr->action_flags;
+       MLX5_SET(flow_context, in_flow_context, action, action_flags);
+
+       if (action_flags & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
+               MLX5_SET(flow_context, in_flow_context,
+                        packet_reformat_id, fte_attr->packet_reformat_id);
+       }
+
+       if (action_flags & (MLX5_FLOW_CONTEXT_ACTION_DECRYPT | MLX5_FLOW_CONTEXT_ACTION_ENCRYPT)) {
+               MLX5_SET(flow_context, in_flow_context,
+                        encrypt_decrypt_type, fte_attr->encrypt_decrypt_type);
+               MLX5_SET(flow_context, in_flow_context,
+                        encrypt_decrypt_obj_id, fte_attr->encrypt_decrypt_obj_id);
+       }
+
+       if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+               in_dests = (u8 *)MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+
+               for (i = 0; i < fte_attr->dests_num; i++) {
+                       struct mlx5hws_cmd_set_fte_dest *dest = &fte_attr->dests[i];
+                       enum mlx5_ifc_flow_destination_type ifc_dest_type =
+                               hws_cmd_dest_type_to_ifc_dest_type(dest->destination_type);
+
+                       switch (dest->destination_type) {
+                       case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+                               if (dest->ext_flags & MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID) {
+                                       MLX5_SET(dest_format, in_dests,
+                                                destination_eswitch_owner_vhca_id_valid, 1);
+                                       MLX5_SET(dest_format, in_dests,
+                                                destination_eswitch_owner_vhca_id,
+                                                dest->esw_owner_vhca_id);
+                               }
+                               fallthrough;
+                       case MLX5_FLOW_DESTINATION_TYPE_TIR:
+                       case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+                               MLX5_SET(dest_format, in_dests, destination_type, ifc_dest_type);
+                               MLX5_SET(dest_format, in_dests, destination_id,
+                                        dest->destination_id);
+                               if (dest->ext_flags & MLX5HWS_CMD_EXT_DEST_REFORMAT) {
+                                       MLX5_SET(dest_format, in_dests, packet_reformat, 1);
+                                       MLX5_SET(extended_dest_format, in_dests, packet_reformat_id,
+                                                dest->ext_reformat_id);
+                               }
+                               break;
+                       default:
+                               ret = -EOPNOTSUPP;
+                               goto out;
+                       }
+
+                       in_dests = in_dests + dest_entry_sz;
+               }
+               MLX5_SET(flow_context, in_flow_context, destination_list_size, fte_attr->dests_num);
+       }
+
+       ret = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+       if (ret)
+               mlx5_core_err(mdev, "Failed creating FLOW_TABLE_ENTRY\n");
+
+out:
+       kfree(in);
+       return ret;
+}
+
+int mlx5hws_cmd_delete_fte(struct mlx5_core_dev *mdev,
+                          u32 table_type,
+                          u32 table_id)
+{
+       u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
+
+       MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+       MLX5_SET(delete_fte_in, in, table_type, table_type);
+       MLX5_SET(delete_fte_in, in, table_id, table_id);
+
+       return mlx5_cmd_exec_in(mdev, delete_fte, in);
+}
+
+struct mlx5hws_cmd_forward_tbl *
+mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev,
+                              struct mlx5hws_cmd_ft_create_attr *ft_attr,
+                              struct mlx5hws_cmd_set_fte_attr *fte_attr)
+{
+       struct mlx5hws_cmd_fg_attr fg_attr = {0};
+       struct mlx5hws_cmd_forward_tbl *tbl;
+       int ret;
+
+       tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
+       if (!tbl)
+               return NULL;
+
+       ret = mlx5hws_cmd_flow_table_create(mdev, ft_attr, &tbl->ft_id);
+       if (ret) {
+               mlx5_core_err(mdev, "Failed to create FT\n");
+               goto free_tbl;
+       }
+
+       fg_attr.table_id = tbl->ft_id;
+       fg_attr.table_type = ft_attr->type;
+
+       ret = hws_cmd_flow_group_create(mdev, &fg_attr, &tbl->fg_id);
+       if (ret) {
+               mlx5_core_err(mdev, "Failed to create FG\n");
+               goto free_ft;
+       }
+
+       ret = mlx5hws_cmd_set_fte(mdev, ft_attr->type,
+                                 tbl->ft_id, tbl->fg_id, fte_attr);
+       if (ret) {
+               mlx5_core_err(mdev, "Failed to create FTE\n");
+               goto free_fg;
+       }
+
+       tbl->type = ft_attr->type;
+       return tbl;
+
+free_fg:
+       hws_cmd_flow_group_destroy(mdev, tbl->ft_id, tbl->fg_id, ft_attr->type);
+free_ft:
+       mlx5hws_cmd_flow_table_destroy(mdev, ft_attr->type, tbl->ft_id);
+free_tbl:
+       kfree(tbl);
+       return NULL;
+}
+
+void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev,
+                                    struct mlx5hws_cmd_forward_tbl *tbl)
+{
+       mlx5hws_cmd_delete_fte(mdev, tbl->type, tbl->ft_id);
+       hws_cmd_flow_group_destroy(mdev, tbl->ft_id, tbl->fg_id, tbl->type);
+       mlx5hws_cmd_flow_table_destroy(mdev, tbl->type, tbl->ft_id);
+       kfree(tbl);
+}
+
+void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
+                                          u32 fw_ft_type,
+                                          enum mlx5hws_table_type type,
+                                          struct mlx5hws_cmd_ft_modify_attr *ft_attr)
+{
+       u32 default_miss_tbl;
+
+       if (type != MLX5HWS_TABLE_TYPE_FDB)
+               return;
+
+       ft_attr->modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
+       ft_attr->type = fw_ft_type;
+       ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
+
+       default_miss_tbl = ctx->common_res[type].default_miss->ft_id;
+       if (!default_miss_tbl) {
+               pr_warn("HWS: no flow table ID for default miss\n");
+               return;
+       }
+
+       ft_attr->table_miss_id = default_miss_tbl;
+}
+
+int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev,
+                          struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
+                          u32 *rtc_id)
+{
+       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+       u32 in[MLX5_ST_SZ_DW(create_rtc_in)] = {0};
+       void *attr;
+       int ret;
+
+       attr = MLX5_ADDR_OF(create_rtc_in, in, hdr);
+       MLX5_SET(general_obj_in_cmd_hdr,
+                attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr,
+                attr, obj_type, MLX5_OBJ_TYPE_RTC);
+
+       attr = MLX5_ADDR_OF(create_rtc_in, in, rtc);
+       MLX5_SET(rtc, attr, ste_format_0, rtc_attr->is_frst_jumbo ?
+                MLX5_IFC_RTC_STE_FORMAT_11DW :
+                MLX5_IFC_RTC_STE_FORMAT_8DW);
+
+       if (rtc_attr->is_scnd_range) {
+               MLX5_SET(rtc, attr, ste_format_1, MLX5_IFC_RTC_STE_FORMAT_RANGE);
+               MLX5_SET(rtc, attr, num_match_ste, 2);
+       }
+
+       MLX5_SET(rtc, attr, pd, rtc_attr->pd);
+       MLX5_SET(rtc, attr, update_method, rtc_attr->fw_gen_wqe);
+       MLX5_SET(rtc, attr, update_index_mode, rtc_attr->update_index_mode);
+       MLX5_SET(rtc, attr, access_index_mode, rtc_attr->access_index_mode);
+       MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
+       MLX5_SET(rtc, attr, log_depth, rtc_attr->log_depth);
+       MLX5_SET(rtc, attr, log_hash_size, rtc_attr->log_size);
+       MLX5_SET(rtc, attr, table_type, rtc_attr->table_type);
+       MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
+       MLX5_SET(rtc, attr, match_definer_0, rtc_attr->match_definer_0);
+       MLX5_SET(rtc, attr, match_definer_1, rtc_attr->match_definer_1);
+       MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base);
+       MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base);
+       MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset);
+       MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id);
+       MLX5_SET(rtc, attr, reparse_mode, rtc_attr->reparse_mode);
+
+       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+       if (ret) {
+               mlx5_core_err(mdev, "Failed to create RTC\n");
+               goto out;
+       }
+
+       *rtc_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+       return ret;
+}
+
+void mlx5hws_cmd_rtc_destroy(struct mlx5_core_dev *mdev, u32 rtc_id)
+{
+       hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_RTC, rtc_id);
+}
+
+int mlx5hws_cmd_stc_create(struct mlx5_core_dev *mdev,
+                          struct mlx5hws_cmd_stc_create_attr *stc_attr,
+                          u32 *stc_id)
+{
+       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+       u32 in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
+       void *attr;
+       int ret;
+
+       attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
+       MLX5_SET(general_obj_in_cmd_hdr,
+                attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr,
+                attr, obj_type, MLX5_OBJ_TYPE_STC);
+       MLX5_SET(general_obj_in_cmd_hdr,
+                attr, op_param.create.log_obj_range, stc_attr->log_obj_range);
+
+       attr = MLX5_ADDR_OF(create_stc_in, in, stc);
+       MLX5_SET(stc, attr, table_type, stc_attr->table_type);
+
+       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+       if (ret) {
+               mlx5_core_err(mdev, "Failed to create STC\n");
+               goto out;
+       }
+
+       *stc_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+       return ret;
+}
+
+void mlx5hws_cmd_stc_destroy(struct mlx5_core_dev *mdev, u32 stc_id)
+{
+       hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_STC, stc_id);
+}
+
+static int
+hws_cmd_stc_modify_set_stc_param(struct mlx5_core_dev *mdev,
+                                struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+                                void *stc_param)
+{
+       switch (stc_attr->action_type) {
+       case MLX5_IFC_STC_ACTION_TYPE_COUNTER:
+               MLX5_SET(stc_ste_param_flow_counter, stc_param, flow_counter_id, stc_attr->id);
+               break;
+       case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR:
+               MLX5_SET(stc_ste_param_tir, stc_param, tirn, stc_attr->dest_tir_num);
+               break;
+       case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT:
+               MLX5_SET(stc_ste_param_table, stc_param, table_id, stc_attr->dest_table_id);
+               break;
+       case MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST:
+               MLX5_SET(stc_ste_param_header_modify_list, stc_param,
+                        header_modify_pattern_id, stc_attr->modify_header.pattern_id);
+               MLX5_SET(stc_ste_param_header_modify_list, stc_param,
+                        header_modify_argument_id, stc_attr->modify_header.arg_id);
+               break;
+       case MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE:
+               MLX5_SET(stc_ste_param_remove, stc_param, action_type,
+                        MLX5_MODIFICATION_TYPE_REMOVE);
+               MLX5_SET(stc_ste_param_remove, stc_param, decap,
+                        stc_attr->remove_header.decap);
+               MLX5_SET(stc_ste_param_remove, stc_param, remove_start_anchor,
+                        stc_attr->remove_header.start_anchor);
+               MLX5_SET(stc_ste_param_remove, stc_param, remove_end_anchor,
+                        stc_attr->remove_header.end_anchor);
+               break;
+       case MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT:
+               MLX5_SET(stc_ste_param_insert, stc_param, action_type,
+                        MLX5_MODIFICATION_TYPE_INSERT);
+               MLX5_SET(stc_ste_param_insert, stc_param, encap,
+                        stc_attr->insert_header.encap);
+               MLX5_SET(stc_ste_param_insert, stc_param, inline_data,
+                        stc_attr->insert_header.is_inline);
+               MLX5_SET(stc_ste_param_insert, stc_param, insert_anchor,
+                        stc_attr->insert_header.insert_anchor);
+               /* HW gets the next 2 sizes in words */
+               MLX5_SET(stc_ste_param_insert, stc_param, insert_size,
+                        stc_attr->insert_header.header_size / W_SIZE);
+               MLX5_SET(stc_ste_param_insert, stc_param, insert_offset,
+                        stc_attr->insert_header.insert_offset / W_SIZE);
+               MLX5_SET(stc_ste_param_insert, stc_param, insert_argument,
+                        stc_attr->insert_header.arg_id);
+               break;
+       case MLX5_IFC_STC_ACTION_TYPE_COPY:
+       case MLX5_IFC_STC_ACTION_TYPE_SET:
+       case MLX5_IFC_STC_ACTION_TYPE_ADD:
+       case MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD:
+               *(__be64 *)stc_param = stc_attr->modify_action.data;
+               break;
+       case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT:
+       case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK:
+               MLX5_SET(stc_ste_param_vport, stc_param, vport_number,
+                        stc_attr->vport.vport_num);
+               MLX5_SET(stc_ste_param_vport, stc_param, eswitch_owner_vhca_id,
+                        stc_attr->vport.esw_owner_vhca_id);
+               MLX5_SET(stc_ste_param_vport, stc_param, eswitch_owner_vhca_id_valid,
+                        stc_attr->vport.eswitch_owner_vhca_id_valid);
+               break;
+       case MLX5_IFC_STC_ACTION_TYPE_DROP:
+       case MLX5_IFC_STC_ACTION_TYPE_NOP:
+       case MLX5_IFC_STC_ACTION_TYPE_TAG:
+       case MLX5_IFC_STC_ACTION_TYPE_ALLOW:
+               break;
+       case MLX5_IFC_STC_ACTION_TYPE_ASO:
+               MLX5_SET(stc_ste_param_execute_aso, stc_param, aso_object_id,
+                        stc_attr->aso.devx_obj_id);
+               MLX5_SET(stc_ste_param_execute_aso, stc_param, return_reg_id,
+                        stc_attr->aso.return_reg_id);
+               MLX5_SET(stc_ste_param_execute_aso, stc_param, aso_type,
+                        stc_attr->aso.aso_type);
+               break;
+       case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE:
+               MLX5_SET(stc_ste_param_ste_table, stc_param, ste_obj_id,
+                        stc_attr->ste_table.ste_obj_id);
+               MLX5_SET(stc_ste_param_ste_table, stc_param, match_definer_id,
+                        stc_attr->ste_table.match_definer_id);
+               MLX5_SET(stc_ste_param_ste_table, stc_param, log_hash_size,
+                        stc_attr->ste_table.log_hash_size);
+               break;
+       case MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS:
+               MLX5_SET(stc_ste_param_remove_words, stc_param, action_type,
+                        MLX5_MODIFICATION_TYPE_REMOVE_WORDS);
+               MLX5_SET(stc_ste_param_remove_words, stc_param, remove_start_anchor,
+                        stc_attr->remove_words.start_anchor);
+               MLX5_SET(stc_ste_param_remove_words, stc_param,
+                        remove_size, stc_attr->remove_words.num_of_words);
+               break;
+       case MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_ENCRYPTION:
+               MLX5_SET(stc_ste_param_ipsec_encrypt, stc_param, ipsec_object_id,
+                        stc_attr->id);
+               break;
+       case MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_DECRYPTION:
+               MLX5_SET(stc_ste_param_ipsec_decrypt, stc_param, ipsec_object_id,
+                        stc_attr->id);
+               break;
+       case MLX5_IFC_STC_ACTION_TYPE_TRAILER:
+               MLX5_SET(stc_ste_param_trailer, stc_param, command,
+                        stc_attr->reformat_trailer.op);
+               MLX5_SET(stc_ste_param_trailer, stc_param, type,
+                        stc_attr->reformat_trailer.type);
+               MLX5_SET(stc_ste_param_trailer, stc_param, length,
+                        stc_attr->reformat_trailer.size);
+               break;
+       default:
+               mlx5_core_err(mdev, "Not supported type %d\n", stc_attr->action_type);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+int mlx5hws_cmd_stc_modify(struct mlx5_core_dev *mdev,
+                          u32 stc_id,
+                          struct mlx5hws_cmd_stc_modify_attr *stc_attr)
+{
+       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+       u32 in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
+       void *stc_param;
+       void *attr;
+       int ret;
+
+       attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
+       MLX5_SET(general_obj_in_cmd_hdr,
+                attr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr,
+                attr, obj_type, MLX5_OBJ_TYPE_STC);
+       MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, stc_id);
+       MLX5_SET(general_obj_in_cmd_hdr, in,
+                op_param.query.obj_offset, stc_attr->stc_offset);
+
+       attr = MLX5_ADDR_OF(create_stc_in, in, stc);
+       MLX5_SET(stc, attr, ste_action_offset, stc_attr->action_offset);
+       MLX5_SET(stc, attr, action_type, stc_attr->action_type);
+       MLX5_SET(stc, attr, reparse_mode, stc_attr->reparse_mode);
+       MLX5_SET64(stc, attr, modify_field_select,
+                  MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC);
+
+       /* Set destination TIRN, TAG, FT ID, STE ID */
+       stc_param = MLX5_ADDR_OF(stc, attr, stc_param);
+       ret = hws_cmd_stc_modify_set_stc_param(mdev, stc_attr, stc_param);
+       if (ret)
+               return ret;
+
+       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+       if (ret)
+               mlx5_core_err(mdev, "Failed to modify STC FW action_type %d\n",
+                             stc_attr->action_type);
+
+       return ret;
+}
+
+int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
+                          u16 log_obj_range,
+                          u32 pd,
+                          u32 *arg_id)
+{
+       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+       u32 in[MLX5_ST_SZ_DW(create_arg_in)] = {0};
+       void *attr;
+       int ret;
+
+       attr = MLX5_ADDR_OF(create_arg_in, in, hdr);
+       MLX5_SET(general_obj_in_cmd_hdr,
+                attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr,
+                attr, obj_type, MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT);
+       MLX5_SET(general_obj_in_cmd_hdr,
+                attr, op_param.create.log_obj_range, log_obj_range);
+
+       attr = MLX5_ADDR_OF(create_arg_in, in, arg);
+       MLX5_SET(arg, attr, access_pd, pd);
+
+       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+       if (ret) {
+               mlx5_core_err(mdev, "Failed to create ARG\n");
+               goto out;
+       }
+
+       *arg_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+       return ret;
+}
+
+void mlx5hws_cmd_arg_destroy(struct mlx5_core_dev *mdev,
+                            u32 arg_id)
+{
+       hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT, arg_id);
+}
+
+int mlx5hws_cmd_header_modify_pattern_create(struct mlx5_core_dev *mdev,
+                                            u32 pattern_length,
+                                            u8 *actions,
+                                            u32 *ptrn_id)
+{
+       u32 in[MLX5_ST_SZ_DW(create_header_modify_pattern_in)] = {0};
+       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+       int num_of_actions;
+       u64 *pattern_data;
+       void *pattern;
+       void *attr;
+       int ret;
+       int i;
+
+       if (pattern_length > MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY) {
+               mlx5_core_err(mdev, "Pattern length %d exceeds limit %d\n",
+                             pattern_length, MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY);
+               return -EINVAL;
+       }
+
+       attr = MLX5_ADDR_OF(create_header_modify_pattern_in, in, hdr);
+       MLX5_SET(general_obj_in_cmd_hdr,
+                attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr,
+                attr, obj_type, MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN);
+
+       pattern = MLX5_ADDR_OF(create_header_modify_pattern_in, in, pattern);
+       /* Pattern_length is in ddwords */
+       MLX5_SET(header_modify_pattern_in, pattern, pattern_length, pattern_length / (2 * DW_SIZE));
+
+       pattern_data = (u64 *)MLX5_ADDR_OF(header_modify_pattern_in, pattern, pattern_data);
+       memcpy(pattern_data, actions, pattern_length);
+
+       num_of_actions = pattern_length / MLX5HWS_MODIFY_ACTION_SIZE;
+       for (i = 0; i < num_of_actions; i++) {
+               int type;
+
+               type = MLX5_GET(set_action_in, &pattern_data[i], action_type);
+               if (type != MLX5_MODIFICATION_TYPE_COPY &&
+                   type != MLX5_MODIFICATION_TYPE_ADD_FIELD)
+                       /* Action typ-copy use all bytes for control */
+                       MLX5_SET(set_action_in, &pattern_data[i], data, 0);
+       }
+
+       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+       if (ret) {
+               mlx5_core_err(mdev, "Failed to create header_modify_pattern\n");
+               goto out;
+       }
+
+       *ptrn_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+       return ret;
+}
+
+void mlx5hws_cmd_header_modify_pattern_destroy(struct mlx5_core_dev *mdev,
+                                              u32 ptrn_id)
+{
+       hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN, ptrn_id);
+}
+
+int mlx5hws_cmd_ste_create(struct mlx5_core_dev *mdev,
+                          struct mlx5hws_cmd_ste_create_attr *ste_attr,
+                          u32 *ste_id)
+{
+       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+       u32 in[MLX5_ST_SZ_DW(create_ste_in)] = {0};
+       void *attr;
+       int ret;
+
+       attr = MLX5_ADDR_OF(create_ste_in, in, hdr);
+       MLX5_SET(general_obj_in_cmd_hdr,
+                attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr,
+                attr, obj_type, MLX5_OBJ_TYPE_STE);
+       MLX5_SET(general_obj_in_cmd_hdr,
+                attr, op_param.create.log_obj_range, ste_attr->log_obj_range);
+
+       attr = MLX5_ADDR_OF(create_ste_in, in, ste);
+       MLX5_SET(ste, attr, table_type, ste_attr->table_type);
+
+       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+       if (ret) {
+               mlx5_core_err(mdev, "Failed to create STE\n");
+               goto out;
+       }
+
+       *ste_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+       return ret;
+}
+
+void mlx5hws_cmd_ste_destroy(struct mlx5_core_dev *mdev, u32 ste_id)
+{
+       hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_STE, ste_id);
+}
+
+int mlx5hws_cmd_definer_create(struct mlx5_core_dev *mdev,
+                              struct mlx5hws_cmd_definer_create_attr *def_attr,
+                              u32 *definer_id)
+{
+       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+       u32 in[MLX5_ST_SZ_DW(create_definer_in)] = {0};
+       void *ptr;
+       int ret;
+
+       MLX5_SET(general_obj_in_cmd_hdr,
+                in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr,
+                in, obj_type, MLX5_OBJ_TYPE_MATCH_DEFINER);
+
+       ptr = MLX5_ADDR_OF(create_definer_in, in, definer);
+       MLX5_SET(definer, ptr, format_id, MLX5_IFC_DEFINER_FORMAT_ID_SELECT);
+
+       MLX5_SET(definer, ptr, format_select_dw0, def_attr->dw_selector[0]);
+       MLX5_SET(definer, ptr, format_select_dw1, def_attr->dw_selector[1]);
+       MLX5_SET(definer, ptr, format_select_dw2, def_attr->dw_selector[2]);
+       MLX5_SET(definer, ptr, format_select_dw3, def_attr->dw_selector[3]);
+       MLX5_SET(definer, ptr, format_select_dw4, def_attr->dw_selector[4]);
+       MLX5_SET(definer, ptr, format_select_dw5, def_attr->dw_selector[5]);
+       MLX5_SET(definer, ptr, format_select_dw6, def_attr->dw_selector[6]);
+       MLX5_SET(definer, ptr, format_select_dw7, def_attr->dw_selector[7]);
+       MLX5_SET(definer, ptr, format_select_dw8, def_attr->dw_selector[8]);
+
+       MLX5_SET(definer, ptr, format_select_byte0, def_attr->byte_selector[0]);
+       MLX5_SET(definer, ptr, format_select_byte1, def_attr->byte_selector[1]);
+       MLX5_SET(definer, ptr, format_select_byte2, def_attr->byte_selector[2]);
+       MLX5_SET(definer, ptr, format_select_byte3, def_attr->byte_selector[3]);
+       MLX5_SET(definer, ptr, format_select_byte4, def_attr->byte_selector[4]);
+       MLX5_SET(definer, ptr, format_select_byte5, def_attr->byte_selector[5]);
+       MLX5_SET(definer, ptr, format_select_byte6, def_attr->byte_selector[6]);
+       MLX5_SET(definer, ptr, format_select_byte7, def_attr->byte_selector[7]);
+
+       ptr = MLX5_ADDR_OF(definer, ptr, match_mask);
+       memcpy(ptr, def_attr->match_mask, MLX5_FLD_SZ_BYTES(definer, match_mask));
+
+       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+       if (ret) {
+               mlx5_core_err(mdev, "Failed to create Definer\n");
+               goto out;
+       }
+
+       *definer_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+       return ret;
+}
+
+void mlx5hws_cmd_definer_destroy(struct mlx5_core_dev *mdev,
+                                u32 definer_id)
+{
+       hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_MATCH_DEFINER, definer_id);
+}
+
+int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
+                                      struct mlx5hws_cmd_packet_reformat_create_attr *attr,
+                                      u32 *reformat_id)
+{
+       u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0};
+       size_t insz, cmd_data_sz, cmd_total_sz;
+       void *prctx;
+       void *pdata;
+       void *in;
+       int ret;
+
+       cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
+       cmd_total_sz += MLX5_ST_SZ_BYTES(packet_reformat_context_in);
+       cmd_data_sz = MLX5_FLD_SZ_BYTES(packet_reformat_context_in, reformat_data);
+       insz = align(cmd_total_sz + attr->data_sz - cmd_data_sz, DW_SIZE);
+       in = kzalloc(insz, GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
+                MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
+
+       prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in,
+                            packet_reformat_context);
+       pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
+
+       MLX5_SET(packet_reformat_context_in, prctx, reformat_type, attr->type);
+       MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, attr->reformat_param_0);
+       MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, attr->data_sz);
+       memcpy(pdata, attr->data, attr->data_sz);
+
+       ret = mlx5_cmd_exec(mdev, in, insz, out, sizeof(out));
+       if (ret) {
+               mlx5_core_err(mdev, "Failed to create packet reformat\n");
+               goto out;
+       }
+
+       *reformat_id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id);
+out:
+       kfree(in);
+       return ret;
+}
+
+int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev,
+                                       u32 reformat_id)
+{
+       u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_out)] = {0};
+       u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_in)] = {0};
+       int ret;
+
+       MLX5_SET(dealloc_packet_reformat_in, in, opcode,
+                MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
+       MLX5_SET(dealloc_packet_reformat_in, in,
+                packet_reformat_id, reformat_id);
+
+       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+       if (ret)
+               mlx5_core_err(mdev, "Failed to destroy packet_reformat\n");
+
+       return ret;
+}
+
+int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn)
+{
+       u32 out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
+       u32 in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
+       void *sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+       int ret;
+
+       MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
+       MLX5_SET(modify_sq_in, in, sqn, sqn);
+       MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
+       MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
+
+       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+       if (ret)
+               mlx5_core_err(mdev, "Failed to modify SQ\n");
+
+       return ret;
+}
+
+int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev,
+                                       struct mlx5hws_cmd_allow_other_vhca_access_attr *attr)
+{
+       u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0};
+       u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0};
+       void *key;
+       int ret;
+
+       MLX5_SET(allow_other_vhca_access_in,
+                in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS);
+       MLX5_SET(allow_other_vhca_access_in,
+                in, object_type_to_be_accessed, attr->obj_type);
+       MLX5_SET(allow_other_vhca_access_in,
+                in, object_id_to_be_accessed, attr->obj_id);
+
+       key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key);
+       memcpy(key, attr->access_key, sizeof(attr->access_key));
+
+       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+       if (ret)
+               mlx5_core_err(mdev, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command\n");
+
+       return ret;
+}
+
+int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev,
+                                struct mlx5hws_cmd_alias_obj_create_attr *alias_attr,
+                                u32 *obj_id)
+{
+       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+       u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0};
+       void *attr;
+       void *key;
+       int ret;
+
+       attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr);
+       MLX5_SET(general_obj_in_cmd_hdr,
+                attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr,
+                attr, obj_type, alias_attr->obj_type);
+       MLX5_SET(general_obj_in_cmd_hdr, attr, op_param.create.alias_object, 1);
+
+       attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx);
+       MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id);
+       MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id);
+
+       key = MLX5_ADDR_OF(alias_context, attr, access_key);
+       memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key));
+
+       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+       if (ret) {
+               mlx5_core_err(mdev, "Failed to create ALIAS OBJ\n");
+               goto out;
+       }
+
+       *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+       return ret;
+}
+
+int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev,
+                                 u16 obj_type,
+                                 u32 obj_id)
+{
+       return hws_cmd_general_obj_destroy(mdev, obj_type, obj_id);
+}
+
+int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev,
+                            struct mlx5hws_cmd_generate_wqe_attr *attr,
+                            struct mlx5_cqe64 *ret_cqe)
+{
+       u32 out[MLX5_ST_SZ_DW(generate_wqe_out)] = {0};
+       u32 in[MLX5_ST_SZ_DW(generate_wqe_in)] = {0};
+       u8 status;
+       void *ptr;
+       int ret;
+
+       MLX5_SET(generate_wqe_in, in, opcode, MLX5_CMD_OP_GENERATE_WQE);
+       MLX5_SET(generate_wqe_in, in, pdn, attr->pdn);
+
+       ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_ctrl);
+       memcpy(ptr, attr->wqe_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_ctrl));
+
+       ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_ctrl);
+       memcpy(ptr, attr->gta_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_ctrl));
+
+       ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_0);
+       memcpy(ptr, attr->gta_data_0, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_0));
+
+       if (attr->gta_data_1) {
+               ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_1);
+               memcpy(ptr, attr->gta_data_1, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_1));
+       }
+
+       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+       if (ret) {
+               mlx5_core_err(mdev, "Failed to write GTA WQE using FW\n");
+               return ret;
+       }
+
+       status = MLX5_GET(generate_wqe_out, out, status);
+       if (status) {
+               mlx5_core_err(mdev, "Invalid FW CQE status %d\n", status);
+               return -EINVAL;
+       }
+
+       ptr = MLX5_ADDR_OF(generate_wqe_out, out, cqe_data);
+       memcpy(ret_cqe, ptr, sizeof(*ret_cqe));
+
+       return ret;
+}
+
+int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev,
+                          struct mlx5hws_cmd_query_caps *caps)
+{
+       u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
+       u32 out_size;
+       u32 *out;
+       int ret;
+
+       out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+       out = kzalloc(out_size, GFP_KERNEL);
+       if (!out)
+               return -ENOMEM;
+
+       MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+       MLX5_SET(query_hca_cap_in, in, op_mod,
+                MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE | HCA_CAP_OPMOD_GET_CUR);
+
+       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+       if (ret) {
+               mlx5_core_err(mdev, "Failed to query device caps\n");
+               goto out;
+       }
+
+       caps->wqe_based_update =
+               MLX5_GET(query_hca_cap_out, out,
+                        capability.cmd_hca_cap.wqe_based_flow_table_update_cap);
+
+       caps->eswitch_manager = MLX5_GET(query_hca_cap_out, out,
+                                        capability.cmd_hca_cap.eswitch_manager);
+
+       caps->flex_protocols = MLX5_GET(query_hca_cap_out, out,
+                                       capability.cmd_hca_cap.flex_parser_protocols);
+
+       if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED)
+               caps->flex_parser_id_geneve_tlv_option_0 =
+                       MLX5_GET(query_hca_cap_out, out,
+                                capability.cmd_hca_cap.flex_parser_id_geneve_tlv_option_0);
+
+       if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)
+               caps->flex_parser_id_mpls_over_gre =
+                       MLX5_GET(query_hca_cap_out, out,
+                                capability.cmd_hca_cap.flex_parser_id_outer_first_mpls_over_gre);
+
+       if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
+               caps->flex_parser_id_mpls_over_udp =
+                       MLX5_GET(query_hca_cap_out, out,
+                                capability.cmd_hca_cap.flex_parser_id_outer_first_mpls_over_udp_label);
+
+       caps->log_header_modify_argument_granularity =
+               MLX5_GET(query_hca_cap_out, out,
+                        capability.cmd_hca_cap.log_header_modify_argument_granularity);
+
+       caps->log_header_modify_argument_granularity -=
+               MLX5_GET(query_hca_cap_out, out,
+                        capability.cmd_hca_cap.log_header_modify_argument_granularity_offset);
+
+       caps->log_header_modify_argument_max_alloc =
+               MLX5_GET(query_hca_cap_out, out,
+                        capability.cmd_hca_cap.log_header_modify_argument_max_alloc);
+
+       caps->definer_format_sup =
+               MLX5_GET64(query_hca_cap_out, out,
+                          capability.cmd_hca_cap.match_definer_format_supported);
+
+       caps->vhca_id = MLX5_GET(query_hca_cap_out, out,
+                                capability.cmd_hca_cap.vhca_id);
+
+       caps->sq_ts_format = MLX5_GET(query_hca_cap_out, out,
+                                     capability.cmd_hca_cap.sq_ts_format);
+
+       caps->ipsec_offload = MLX5_GET(query_hca_cap_out, out,
+                                      capability.cmd_hca_cap.ipsec_offload);
+
+       MLX5_SET(query_hca_cap_in, in, op_mod,
+                MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 | HCA_CAP_OPMOD_GET_CUR);
+
+       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+       if (ret) {
+               mlx5_core_err(mdev, "Failed to query device caps 2\n");
+               goto out;
+       }
+
+       caps->full_dw_jumbo_support =
+               MLX5_GET(query_hca_cap_out, out,
+                        capability.cmd_hca_cap_2.format_select_dw_8_6_ext);
+
+       caps->format_select_gtpu_dw_0 =
+               MLX5_GET(query_hca_cap_out, out,
+                        capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_0);
+
+       caps->format_select_gtpu_dw_1 =
+               MLX5_GET(query_hca_cap_out, out,
+                        capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_1);
+
+       caps->format_select_gtpu_dw_2 =
+               MLX5_GET(query_hca_cap_out, out,
+                        capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_2);
+
+       caps->format_select_gtpu_ext_dw_0 =
+               MLX5_GET(query_hca_cap_out, out,
+                        capability.cmd_hca_cap_2.format_select_dw_gtpu_first_ext_dw_0);
+
+       caps->supp_type_gen_wqe =
+               MLX5_GET(query_hca_cap_out, out,
+                        capability.cmd_hca_cap_2.generate_wqe_type);
+
+       caps->flow_table_hash_type =
+               MLX5_GET(query_hca_cap_out, out,
+                        capability.cmd_hca_cap_2.flow_table_hash_type);
+
+       MLX5_SET(query_hca_cap_in, in, op_mod,
+                MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR);
+
+       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+       if (ret) {
+               mlx5_core_err(mdev, "Failed to query flow table caps\n");
+               goto out;
+       }
+
+       caps->nic_ft.max_level =
+               MLX5_GET(query_hca_cap_out, out,
+                        capability.flow_table_nic_cap.flow_table_properties_nic_receive.max_ft_level);
+
+       caps->nic_ft.reparse =
+               MLX5_GET(query_hca_cap_out, out,
+                        capability.flow_table_nic_cap.flow_table_properties_nic_receive.reparse);
+
+       caps->nic_ft.ignore_flow_level_rtc_valid =
+               MLX5_GET(query_hca_cap_out, out,
+                        capability.flow_table_nic_cap.flow_table_properties_nic_receive.ignore_flow_level_rtc_valid);
+
+       caps->flex_parser_ok_bits_supp =
+               MLX5_GET(query_hca_cap_out, out,
+                        capability.flow_table_nic_cap.flow_table_properties_nic_receive.ft_field_support.geneve_tlv_option_0_exist);
+
+       if (caps->wqe_based_update) {
+               MLX5_SET(query_hca_cap_in, in, op_mod,
+                        MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR);
+
+               ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+               if (ret) {
+                       mlx5_core_err(mdev, "Failed to query WQE based FT caps\n");
+                       goto out;
+               }
+
+               caps->rtc_reparse_mode =
+                       MLX5_GET(query_hca_cap_out, out,
+                                capability.wqe_based_flow_table_cap.rtc_reparse_mode);
+
+               caps->ste_format =
+                       MLX5_GET(query_hca_cap_out, out,
+                                capability.wqe_based_flow_table_cap.ste_format);
+
+               caps->rtc_index_mode =
+                       MLX5_GET(query_hca_cap_out, out,
+                                capability.wqe_based_flow_table_cap.rtc_index_mode);
+
+               caps->rtc_log_depth_max =
+                       MLX5_GET(query_hca_cap_out, out,
+                                capability.wqe_based_flow_table_cap.rtc_log_depth_max);
+
+               caps->ste_alloc_log_max =
+                       MLX5_GET(query_hca_cap_out, out,
+                                capability.wqe_based_flow_table_cap.ste_alloc_log_max);
+
+               caps->ste_alloc_log_gran =
+                       MLX5_GET(query_hca_cap_out, out,
+                                capability.wqe_based_flow_table_cap.ste_alloc_log_granularity);
+
+               caps->trivial_match_definer =
+                       MLX5_GET(query_hca_cap_out, out,
+                                capability.wqe_based_flow_table_cap.trivial_match_definer);
+
+               caps->stc_alloc_log_max =
+                       MLX5_GET(query_hca_cap_out, out,
+                                capability.wqe_based_flow_table_cap.stc_alloc_log_max);
+
+               caps->stc_alloc_log_gran =
+                       MLX5_GET(query_hca_cap_out, out,
+                                capability.wqe_based_flow_table_cap.stc_alloc_log_granularity);
+
+               caps->rtc_hash_split_table =
+                       MLX5_GET(query_hca_cap_out, out,
+                                capability.wqe_based_flow_table_cap.rtc_hash_split_table);
+
+               caps->rtc_linear_lookup_table =
+                       MLX5_GET(query_hca_cap_out, out,
+                                capability.wqe_based_flow_table_cap.rtc_linear_lookup_table);
+
+               caps->access_index_mode =
+                       MLX5_GET(query_hca_cap_out, out,
+                                capability.wqe_based_flow_table_cap.access_index_mode);
+
+               caps->linear_match_definer =
+                       MLX5_GET(query_hca_cap_out, out,
+                                capability.wqe_based_flow_table_cap.linear_match_definer_reg_c3);
+
+               caps->rtc_max_hash_def_gen_wqe =
+                       MLX5_GET(query_hca_cap_out, out,
+                                capability.wqe_based_flow_table_cap.rtc_max_num_hash_definer_gen_wqe);
+
+               caps->supp_ste_format_gen_wqe =
+                       MLX5_GET(query_hca_cap_out, out,
+                                capability.wqe_based_flow_table_cap.ste_format_gen_wqe);
+
+               caps->fdb_tir_stc =
+                       MLX5_GET(query_hca_cap_out, out,
+                                capability.wqe_based_flow_table_cap.fdb_jump_to_tir_stc);
+       }
+
+       if (caps->eswitch_manager) {
+               MLX5_SET(query_hca_cap_in, in, op_mod,
+                        MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR);
+
+               ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+               if (ret) {
+                       mlx5_core_err(mdev, "Failed to query flow table esw caps\n");
+                       goto out;
+               }
+
+               caps->fdb_ft.max_level =
+                       MLX5_GET(query_hca_cap_out, out,
+                                capability.flow_table_nic_cap.flow_table_properties_nic_receive.max_ft_level);
+
+               caps->fdb_ft.reparse =
+                       MLX5_GET(query_hca_cap_out, out,
+                                capability.flow_table_nic_cap.flow_table_properties_nic_receive.reparse);
+
+               MLX5_SET(query_hca_cap_in, in, op_mod,
+                        MLX5_SET_HCA_CAP_OP_MOD_ESW | HCA_CAP_OPMOD_GET_CUR);
+
+               ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+               if (ret) {
+                       mlx5_core_err(mdev, "Failed to query eswitch capabilities\n");
+                       goto out;
+               }
+
+               if (MLX5_GET(query_hca_cap_out, out,
+                            capability.esw_cap.esw_manager_vport_number_valid))
+                       caps->eswitch_manager_vport_number =
+                               MLX5_GET(query_hca_cap_out, out,
+                                        capability.esw_cap.esw_manager_vport_number);
+
+               caps->merged_eswitch = MLX5_GET(query_hca_cap_out, out,
+                                               capability.esw_cap.merged_eswitch);
+       }
+
+       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+       if (ret) {
+               mlx5_core_err(mdev, "Failed to query device attributes\n");
+               goto out;
+       }
+
+       snprintf(caps->fw_ver, sizeof(caps->fw_ver), "%d.%d.%d",
+                fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev));
+
+       caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev);
+
+out:
+       kfree(out);
+       return ret;
+}
+
+int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
+                          u16 vport_number, u16 *gvmi)
+{
+       bool ec_vf_func = other_function ? mlx5_core_is_ec_vf_vport(mdev, vport_number) : false;
+       u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
+       int out_size;
+       void *out;
+       int err;
+
+       out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+       out = kzalloc(out_size, GFP_KERNEL);
+       if (!out)
+               return -ENOMEM;
+
+       MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+       MLX5_SET(query_hca_cap_in, in, other_function, other_function);
+       MLX5_SET(query_hca_cap_in, in, function_id,
+                mlx5_vport_to_func_id(mdev, vport_number, ec_vf_func));
+       MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
+       MLX5_SET(query_hca_cap_in, in, op_mod,
+                MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 | HCA_CAP_OPMOD_GET_CUR);
+
+       err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
+       if (err) {
+               kfree(out);
+               return err;
+       }
+
+       *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
+
+       kfree(out);
+
+       return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
new file mode 100644 (file)
index 0000000..434f62b
--- /dev/null
@@ -0,0 +1,361 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef HWS_CMD_H_
+#define HWS_CMD_H_
+
+#define WIRE_PORT 0xFFFF
+
+#define ACCESS_KEY_LEN 32
+
+enum mlx5hws_cmd_ext_dest_flags {
+       MLX5HWS_CMD_EXT_DEST_REFORMAT = 1 << 0,
+       MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID = 1 << 1,
+};
+
+struct mlx5hws_cmd_set_fte_dest {
+       u8 destination_type;
+       u32 destination_id;
+       enum mlx5hws_cmd_ext_dest_flags ext_flags;
+       u32 ext_reformat_id;
+       u16 esw_owner_vhca_id;
+};
+
+struct mlx5hws_cmd_set_fte_attr {
+       u32 action_flags;
+       bool ignore_flow_level;
+       u8 flow_source;
+       u8 extended_dest;
+       u8 encrypt_decrypt_type;
+       u32 encrypt_decrypt_obj_id;
+       u32 packet_reformat_id;
+       u32 dests_num;
+       struct mlx5hws_cmd_set_fte_dest *dests;
+};
+
+struct mlx5hws_cmd_ft_create_attr {
+       u8 type;
+       u8 level;
+       bool rtc_valid;
+       bool decap_en;
+       bool reformat_en;
+};
+
+struct mlx5hws_cmd_ft_modify_attr {
+       u8 type;
+       u32 rtc_id_0;
+       u32 rtc_id_1;
+       u32 table_miss_id;
+       u8 table_miss_action;
+       u64 modify_fs;
+};
+
+struct mlx5hws_cmd_ft_query_attr {
+       u8 type;
+};
+
+struct mlx5hws_cmd_fg_attr {
+       u32 table_id;
+       u32 table_type;
+};
+
+struct mlx5hws_cmd_forward_tbl {
+       u8 type;
+       u32 ft_id;
+       u32 fg_id;
+       u32 refcount;
+};
+
+struct mlx5hws_cmd_rtc_create_attr {
+       u32 pd;
+       u32 stc_base;
+       u32 ste_base;
+       u32 ste_offset;
+       u32 miss_ft_id;
+       bool fw_gen_wqe;
+       u8 update_index_mode;
+       u8 access_index_mode;
+       u8 num_hash_definer;
+       u8 log_depth;
+       u8 log_size;
+       u8 table_type;
+       u8 match_definer_0;
+       u8 match_definer_1;
+       u8 reparse_mode;
+       bool is_frst_jumbo;
+       bool is_scnd_range;
+};
+
+struct mlx5hws_cmd_alias_obj_create_attr {
+       u32 obj_id;
+       u16 vhca_id;
+       u16 obj_type;
+       u8 access_key[ACCESS_KEY_LEN];
+};
+
+struct mlx5hws_cmd_stc_create_attr {
+       u8 log_obj_range;
+       u8 table_type;
+};
+
+struct mlx5hws_cmd_stc_modify_attr {
+       u32 stc_offset;
+       u8 action_offset;
+       u8 reparse_mode;
+       enum mlx5_ifc_stc_action_type action_type;
+       union {
+               u32 id; /* TIRN, TAG, FT ID, STE ID, CRYPTO */
+               struct {
+                       u8 decap;
+                       u16 start_anchor;
+                       u16 end_anchor;
+               } remove_header;
+               struct {
+                       u32 arg_id;
+                       u32 pattern_id;
+               } modify_header;
+               struct {
+                       __be64 data;
+               } modify_action;
+               struct {
+                       u32 arg_id;
+                       u32 header_size;
+                       u8 is_inline;
+                       u8 encap;
+                       u16 insert_anchor;
+                       u16 insert_offset;
+               } insert_header;
+               struct {
+                       u8 aso_type;
+                       u32 devx_obj_id;
+                       u8 return_reg_id;
+               } aso;
+               struct {
+                       u16 vport_num;
+                       u16 esw_owner_vhca_id;
+                       u8 eswitch_owner_vhca_id_valid;
+               } vport;
+               struct {
+                       struct mlx5hws_pool_chunk ste;
+                       struct mlx5hws_pool *ste_pool;
+                       u32 ste_obj_id; /* Internal */
+                       u32 match_definer_id;
+                       u8 log_hash_size;
+                       bool ignore_tx;
+               } ste_table;
+               struct {
+                       u16 start_anchor;
+                       u16 num_of_words;
+               } remove_words;
+               struct {
+                       u8 type;
+                       u8 op;
+                       u8 size;
+               } reformat_trailer;
+
+               u32 dest_table_id;
+               u32 dest_tir_num;
+       };
+};
+
+struct mlx5hws_cmd_ste_create_attr {
+       u8 log_obj_range;
+       u8 table_type;
+};
+
+struct mlx5hws_cmd_definer_create_attr {
+       u8 *dw_selector;
+       u8 *byte_selector;
+       u8 *match_mask;
+};
+
+struct mlx5hws_cmd_allow_other_vhca_access_attr {
+       u16 obj_type;
+       u32 obj_id;
+       u8 access_key[ACCESS_KEY_LEN];
+};
+
+struct mlx5hws_cmd_packet_reformat_create_attr {
+       u8 type;
+       size_t data_sz;
+       void *data;
+       u8 reformat_param_0;
+};
+
+struct mlx5hws_cmd_query_ft_caps {
+       u8 max_level;
+       u8 reparse;
+       u8 ignore_flow_level_rtc_valid;
+};
+
+struct mlx5hws_cmd_generate_wqe_attr {
+       u8 *wqe_ctrl;
+       u8 *gta_ctrl;
+       u8 *gta_data_0;
+       u8 *gta_data_1;
+       u32 pdn;
+};
+
+struct mlx5hws_cmd_query_caps {
+       u32 flex_protocols;
+       u8 wqe_based_update;
+       u8 rtc_reparse_mode;
+       u16 ste_format;
+       u8 rtc_index_mode;
+       u8 ste_alloc_log_max;
+       u8 ste_alloc_log_gran;
+       u8 stc_alloc_log_max;
+       u8 stc_alloc_log_gran;
+       u8 rtc_log_depth_max;
+       u8 format_select_gtpu_dw_0;
+       u8 format_select_gtpu_dw_1;
+       u8 flow_table_hash_type;
+       u8 format_select_gtpu_dw_2;
+       u8 format_select_gtpu_ext_dw_0;
+       u8 access_index_mode;
+       u32 linear_match_definer;
+       bool full_dw_jumbo_support;
+       bool rtc_hash_split_table;
+       bool rtc_linear_lookup_table;
+       u32 supp_type_gen_wqe;
+       u8 rtc_max_hash_def_gen_wqe;
+       u16 supp_ste_format_gen_wqe;
+       struct mlx5hws_cmd_query_ft_caps nic_ft;
+       struct mlx5hws_cmd_query_ft_caps fdb_ft;
+       bool eswitch_manager;
+       bool merged_eswitch;
+       u32 eswitch_manager_vport_number;
+       u8 log_header_modify_argument_granularity;
+       u8 log_header_modify_argument_max_alloc;
+       u8 sq_ts_format;
+       u8 fdb_tir_stc;
+       u64 definer_format_sup;
+       u32 trivial_match_definer;
+       u32 vhca_id;
+       u32 shared_vhca_id;
+       char fw_ver[64];
+       bool ipsec_offload;
+       bool is_ecpf;
+       u8 flex_parser_ok_bits_supp;
+       u8 flex_parser_id_geneve_tlv_option_0;
+       u8 flex_parser_id_mpls_over_gre;
+       u8 flex_parser_id_mpls_over_udp;
+};
+
+int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev,
+                                 struct mlx5hws_cmd_ft_create_attr *ft_attr,
+                                 u32 *table_id);
+
+int mlx5hws_cmd_flow_table_modify(struct mlx5_core_dev *mdev,
+                                 struct mlx5hws_cmd_ft_modify_attr *ft_attr,
+                                 u32 table_id);
+
+int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev,
+                                u32 obj_id,
+                                struct mlx5hws_cmd_ft_query_attr *ft_attr,
+                                u64 *icm_addr_0, u64 *icm_addr_1);
+
+int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
+                                  u8 fw_ft_type, u32 table_id);
+
+void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev,
+                                         u32 table_id);
+
+int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev,
+                          struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
+                          u32 *rtc_id);
+
+void mlx5hws_cmd_rtc_destroy(struct mlx5_core_dev *mdev, u32 rtc_id);
+
+int mlx5hws_cmd_stc_create(struct mlx5_core_dev *mdev,
+                          struct mlx5hws_cmd_stc_create_attr *stc_attr,
+                          u32 *stc_id);
+
+int mlx5hws_cmd_stc_modify(struct mlx5_core_dev *mdev,
+                          u32 stc_id,
+                          struct mlx5hws_cmd_stc_modify_attr *stc_attr);
+
+void mlx5hws_cmd_stc_destroy(struct mlx5_core_dev *mdev, u32 stc_id);
+
+int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev,
+                            struct mlx5hws_cmd_generate_wqe_attr *attr,
+                            struct mlx5_cqe64 *ret_cqe);
+
+int mlx5hws_cmd_ste_create(struct mlx5_core_dev *mdev,
+                          struct mlx5hws_cmd_ste_create_attr *ste_attr,
+                          u32 *ste_id);
+
+void mlx5hws_cmd_ste_destroy(struct mlx5_core_dev *mdev, u32 ste_id);
+
+int mlx5hws_cmd_definer_create(struct mlx5_core_dev *mdev,
+                              struct mlx5hws_cmd_definer_create_attr *def_attr,
+                              u32 *definer_id);
+
+void mlx5hws_cmd_definer_destroy(struct mlx5_core_dev *mdev,
+                                u32 definer_id);
+
+int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
+                          u16 log_obj_range,
+                          u32 pd,
+                          u32 *arg_id);
+
+void mlx5hws_cmd_arg_destroy(struct mlx5_core_dev *mdev,
+                            u32 arg_id);
+
+int mlx5hws_cmd_header_modify_pattern_create(struct mlx5_core_dev *mdev,
+                                            u32 pattern_length,
+                                            u8 *actions,
+                                            u32 *ptrn_id);
+
+void mlx5hws_cmd_header_modify_pattern_destroy(struct mlx5_core_dev *mdev,
+                                              u32 ptrn_id);
+
+int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
+                                      struct mlx5hws_cmd_packet_reformat_create_attr *attr,
+                                      u32 *reformat_id);
+
+int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev,
+                                       u32 reformat_id);
+
+int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev,
+                       u32 table_type,
+                       u32 table_id,
+                       u32 group_id,
+                       struct mlx5hws_cmd_set_fte_attr *fte_attr);
+
+int mlx5hws_cmd_delete_fte(struct mlx5_core_dev *mdev,
+                          u32 table_type, u32 table_id);
+
+struct mlx5hws_cmd_forward_tbl *
+mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev,
+                              struct mlx5hws_cmd_ft_create_attr *ft_attr,
+                              struct mlx5hws_cmd_set_fte_attr *fte_attr);
+
+void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev,
+                                    struct mlx5hws_cmd_forward_tbl *tbl);
+
+int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev,
+                                struct mlx5hws_cmd_alias_obj_create_attr *alias_attr,
+                                u32 *obj_id);
+
+int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev,
+                                 u16 obj_type,
+                                 u32 obj_id);
+
+int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn);
+
+int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev,
+                          struct mlx5hws_cmd_query_caps *caps);
+
+void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
+                                          u32 fw_ft_type,
+                                          enum mlx5hws_table_type type,
+                                          struct mlx5hws_cmd_ft_modify_attr *ft_attr);
+
+int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev,
+                                       struct mlx5hws_cmd_allow_other_vhca_access_attr *attr);
+
+int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
+                          u16 vport_number, u16 *gvmi);
+
+#endif /* HWS_CMD_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
new file mode 100644 (file)
index 0000000..fd48b05
--- /dev/null
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved. */
+
+#include "internal.h"
+
+bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx)
+{
+       return IS_BIT_SET(ctx->caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_BY_STC);
+}
+
+u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx)
+{
+       /* Prefer to use dynamic reparse, reparse only specific actions */
+       if (mlx5hws_context_cap_dynamic_reparse(ctx))
+               return MLX5_IFC_RTC_REPARSE_NEVER;
+
+       /* Otherwise use less efficient static */
+       return MLX5_IFC_RTC_REPARSE_ALWAYS;
+}
+
+static int hws_context_pools_init(struct mlx5hws_context *ctx)
+{
+       struct mlx5hws_pool_attr pool_attr = {0};
+       u8 max_log_sz;
+       int ret;
+       int i;
+
+       ret = mlx5hws_pat_init_pattern_cache(&ctx->pattern_cache);
+       if (ret)
+               return ret;
+
+       ret = mlx5hws_definer_init_cache(&ctx->definer_cache);
+       if (ret)
+               goto uninit_pat_cache;
+
+       /* Create an STC pool per FT type */
+       pool_attr.pool_type = MLX5HWS_POOL_TYPE_STC;
+       pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STC_POOL;
+       max_log_sz = min(MLX5HWS_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max);
+       pool_attr.alloc_log_sz = max(max_log_sz, ctx->caps->stc_alloc_log_gran);
+
+       for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
+               pool_attr.table_type = i;
+               ctx->stc_pool[i] = mlx5hws_pool_create(ctx, &pool_attr);
+               if (!ctx->stc_pool[i]) {
+                       mlx5hws_err(ctx, "Failed to allocate STC pool [%d]", i);
+                       ret = -ENOMEM;
+                       goto free_stc_pools;
+               }
+       }
+
+       return 0;
+
+free_stc_pools:
+       for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++)
+               if (ctx->stc_pool[i])
+                       mlx5hws_pool_destroy(ctx->stc_pool[i]);
+
+       mlx5hws_definer_uninit_cache(ctx->definer_cache);
+uninit_pat_cache:
+       mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
+       return ret;
+}
+
+static void hws_context_pools_uninit(struct mlx5hws_context *ctx)
+{
+       int i;
+
+       for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
+               if (ctx->stc_pool[i])
+                       mlx5hws_pool_destroy(ctx->stc_pool[i]);
+       }
+
+       mlx5hws_definer_uninit_cache(ctx->definer_cache);
+       mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
+}
+
+static int hws_context_init_pd(struct mlx5hws_context *ctx)
+{
+       int ret = 0;
+
+       ret = mlx5_core_alloc_pd(ctx->mdev, &ctx->pd_num);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to allocate PD\n");
+               return ret;
+       }
+
+       ctx->flags |= MLX5HWS_CONTEXT_FLAG_PRIVATE_PD;
+
+       return 0;
+}
+
+static int hws_context_uninit_pd(struct mlx5hws_context *ctx)
+{
+       if (ctx->flags & MLX5HWS_CONTEXT_FLAG_PRIVATE_PD)
+               mlx5_core_dealloc_pd(ctx->mdev, ctx->pd_num);
+
+       return 0;
+}
+
+static void hws_context_check_hws_supp(struct mlx5hws_context *ctx)
+{
+       struct mlx5hws_cmd_query_caps *caps = ctx->caps;
+
+       /* HWS not supported on device / FW */
+       if (!caps->wqe_based_update) {
+               mlx5hws_err(ctx, "Required HWS WQE based insertion cap not supported\n");
+               return;
+       }
+
+       if (!caps->eswitch_manager) {
+               mlx5hws_err(ctx, "HWS is not supported for non eswitch manager port\n");
+               return;
+       }
+
+       /* Current solution requires all rules to set reparse bit */
+       if ((!caps->nic_ft.reparse ||
+            (!caps->fdb_ft.reparse && caps->eswitch_manager)) ||
+           !IS_BIT_SET(caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS)) {
+               mlx5hws_err(ctx, "Required HWS reparse cap not supported\n");
+               return;
+       }
+
+       /* FW/HW must support 8DW STE */
+       if (!IS_BIT_SET(caps->ste_format, MLX5_IFC_RTC_STE_FORMAT_8DW)) {
+               mlx5hws_err(ctx, "Required HWS STE format not supported\n");
+               return;
+       }
+
+       /* Adding rules by hash and by offset are requirements */
+       if (!IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH) ||
+           !IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET)) {
+               mlx5hws_err(ctx, "Required HWS RTC update mode not supported\n");
+               return;
+       }
+
+       /* Support for SELECT definer ID is required */
+       if (!IS_BIT_SET(caps->definer_format_sup, MLX5_IFC_DEFINER_FORMAT_ID_SELECT)) {
+               mlx5hws_err(ctx, "Required HWS Dynamic definer not supported\n");
+               return;
+       }
+
+       ctx->flags |= MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT;
+}
+
+static int hws_context_init_hws(struct mlx5hws_context *ctx,
+                               struct mlx5hws_context_attr *attr)
+{
+       int ret;
+
+       hws_context_check_hws_supp(ctx);
+
+       if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
+               return 0;
+
+       ret = hws_context_init_pd(ctx);
+       if (ret)
+               return ret;
+
+       ret = hws_context_pools_init(ctx);
+       if (ret)
+               goto uninit_pd;
+
+       if (attr->bwc)
+               ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
+
+       ret = mlx5hws_send_queues_open(ctx, attr->queues, attr->queue_size);
+       if (ret)
+               goto pools_uninit;
+
+       INIT_LIST_HEAD(&ctx->tbl_list);
+
+       return 0;
+
+pools_uninit:
+       hws_context_pools_uninit(ctx);
+uninit_pd:
+       hws_context_uninit_pd(ctx);
+       return ret;
+}
+
+static void hws_context_uninit_hws(struct mlx5hws_context *ctx)
+{
+       if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
+               return;
+
+       mlx5hws_send_queues_close(ctx);
+       hws_context_pools_uninit(ctx);
+       hws_context_uninit_pd(ctx);
+}
+
+struct mlx5hws_context *mlx5hws_context_open(struct mlx5_core_dev *mdev,
+                                            struct mlx5hws_context_attr *attr)
+{
+       struct mlx5hws_context *ctx;
+       int ret;
+
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return NULL;
+
+       ctx->mdev = mdev;
+
+       mutex_init(&ctx->ctrl_lock);
+       xa_init(&ctx->peer_ctx_xa);
+
+       ctx->caps = kzalloc(sizeof(*ctx->caps), GFP_KERNEL);
+       if (!ctx->caps)
+               goto free_ctx;
+
+       ret = mlx5hws_cmd_query_caps(mdev, ctx->caps);
+       if (ret)
+               goto free_caps;
+
+       ret = mlx5hws_vport_init_vports(ctx);
+       if (ret)
+               goto free_caps;
+
+       ret = hws_context_init_hws(ctx, attr);
+       if (ret)
+               goto uninit_vports;
+
+       mlx5hws_debug_init_dump(ctx);
+
+       return ctx;
+
+uninit_vports:
+       mlx5hws_vport_uninit_vports(ctx);
+free_caps:
+       kfree(ctx->caps);
+free_ctx:
+       xa_destroy(&ctx->peer_ctx_xa);
+       mutex_destroy(&ctx->ctrl_lock);
+       kfree(ctx);
+       return NULL;
+}
+
+int mlx5hws_context_close(struct mlx5hws_context *ctx)
+{
+       mlx5hws_debug_uninit_dump(ctx);
+       hws_context_uninit_hws(ctx);
+       mlx5hws_vport_uninit_vports(ctx);
+       kfree(ctx->caps);
+       xa_destroy(&ctx->peer_ctx_xa);
+       mutex_destroy(&ctx->ctrl_lock);
+       kfree(ctx);
+       return 0;
+}
+
+void mlx5hws_context_set_peer(struct mlx5hws_context *ctx,
+                             struct mlx5hws_context *peer_ctx,
+                             u16 peer_vhca_id)
+{
+       mutex_lock(&ctx->ctrl_lock);
+
+       if (xa_err(xa_store(&ctx->peer_ctx_xa, peer_vhca_id, peer_ctx, GFP_KERNEL)))
+               pr_warn("HWS: failed storing peer vhca ID in peer xarray\n");
+
+       mutex_unlock(&ctx->ctrl_lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
new file mode 100644 (file)
index 0000000..47f5cc8
--- /dev/null
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef HWS_CONTEXT_H_
+#define HWS_CONTEXT_H_
+
+enum mlx5hws_context_flags {
+       MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT = 1 << 0,
+       MLX5HWS_CONTEXT_FLAG_PRIVATE_PD = 1 << 1,
+       MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT = 1 << 2,
+};
+
+enum mlx5hws_context_shared_stc_type {
+       MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3 = 0,
+       MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP = 1,
+       MLX5HWS_CONTEXT_SHARED_STC_MAX = 2,
+};
+
+struct mlx5hws_context_common_res {
+       struct mlx5hws_action_default_stc *default_stc;
+       struct mlx5hws_action_shared_stc *shared_stc[MLX5HWS_CONTEXT_SHARED_STC_MAX];
+       struct mlx5hws_cmd_forward_tbl *default_miss;
+};
+
+struct mlx5hws_context_debug_info {
+       struct dentry *steering_debugfs;
+       struct dentry *fdb_debugfs;
+};
+
+struct mlx5hws_context_vports {
+       u16 esw_manager_gvmi;
+       u16 uplink_gvmi;
+       struct xarray vport_gvmi_xa;
+};
+
+struct mlx5hws_context {
+       struct mlx5_core_dev *mdev;
+       struct mlx5hws_cmd_query_caps *caps;
+       u32 pd_num;
+       struct mlx5hws_pool *stc_pool[MLX5HWS_TABLE_TYPE_MAX];
+       struct mlx5hws_context_common_res common_res[MLX5HWS_TABLE_TYPE_MAX];
+       struct mlx5hws_pattern_cache *pattern_cache;
+       struct mlx5hws_definer_cache *definer_cache;
+       struct mutex ctrl_lock; /* control lock to protect the whole context */
+       enum mlx5hws_context_flags flags;
+       struct mlx5hws_send_engine *send_queue;
+       size_t queues;
+       struct mutex *bwc_send_queue_locks; /* protect BWC queues */
+       struct lock_class_key *bwc_lock_class_keys;
+       struct list_head tbl_list;
+       struct mlx5hws_context_debug_info debug_info;
+       struct xarray peer_ctx_xa;
+       struct mlx5hws_context_vports vports;
+};
+
+static inline bool mlx5hws_context_bwc_supported(struct mlx5hws_context *ctx)
+{
+       return ctx->flags & MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
+}
+
+bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx);
+
+u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx);
+
+#endif /* HWS_CONTEXT_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
new file mode 100644 (file)
index 0000000..5b200b4
--- /dev/null
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/version.h>
+#include "internal.h"
+
+static int
+hws_debug_dump_matcher_template_definer(struct seq_file *f,
+                                       void *parent_obj,
+                                       struct mlx5hws_definer *definer,
+                                       enum mlx5hws_debug_res_type type)
+{
+       int i;
+
+       if (!definer)
+               return 0;
+
+       seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,",
+                  type,
+                  HWS_PTR_TO_ID(definer),
+                  HWS_PTR_TO_ID(parent_obj),
+                  definer->obj_id,
+                  definer->type);
+
+       for (i = 0; i < DW_SELECTORS; i++)
+               seq_printf(f, "0x%x%s", definer->dw_selector[i],
+                          (i == DW_SELECTORS - 1) ? "," : "-");
+
+       for (i = 0; i < BYTE_SELECTORS; i++)
+               seq_printf(f, "0x%x%s", definer->byte_selector[i],
+                          (i == BYTE_SELECTORS - 1) ? "," : "-");
+
+       for (i = 0; i < MLX5HWS_JUMBO_TAG_SZ; i++)
+               seq_printf(f, "%02x", definer->mask.jumbo[i]);
+
+       seq_puts(f, "\n");
+
+       return 0;
+}
+
+static int
+hws_debug_dump_matcher_match_template(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+       enum mlx5hws_debug_res_type type;
+       int i, ret;
+
+       for (i = 0; i < matcher->num_of_mt; i++) {
+               struct mlx5hws_match_template *mt = &matcher->mt[i];
+
+               seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d\n",
+                          MLX5HWS_DEBUG_RES_TYPE_MATCHER_MATCH_TEMPLATE,
+                          HWS_PTR_TO_ID(mt),
+                          HWS_PTR_TO_ID(matcher),
+                          mt->fc_sz,
+                          0, 0);
+
+               type = MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_MATCH_DEFINER;
+               ret = hws_debug_dump_matcher_template_definer(f, mt, mt->definer, type);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int
+hws_debug_dump_matcher_action_template(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+       enum mlx5hws_action_type action_type;
+       int i, j;
+
+       for (i = 0; i < matcher->num_of_at; i++) {
+               struct mlx5hws_action_template *at = &matcher->at[i];
+
+               seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d",
+                          MLX5HWS_DEBUG_RES_TYPE_MATCHER_ACTION_TEMPLATE,
+                          HWS_PTR_TO_ID(at),
+                          HWS_PTR_TO_ID(matcher),
+                          at->only_term,
+                          at->num_of_action_stes,
+                          at->num_actions);
+
+               for (j = 0; j < at->num_actions; j++) {
+                       action_type = at->action_type_arr[j];
+                       seq_printf(f, ",%s", mlx5hws_action_type_to_str(action_type));
+               }
+
+               seq_puts(f, "\n");
+       }
+
+       return 0;
+}
+
+static int
+hws_debug_dump_matcher_attr(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+       struct mlx5hws_matcher_attr *attr = &matcher->attr;
+
+       seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d\n",
+                  MLX5HWS_DEBUG_RES_TYPE_MATCHER_ATTR,
+                  HWS_PTR_TO_ID(matcher),
+                  attr->priority,
+                  attr->mode,
+                  attr->table.sz_row_log,
+                  attr->table.sz_col_log,
+                  attr->optimize_using_rule_idx,
+                  attr->optimize_flow_src,
+                  attr->insert_mode,
+                  attr->distribute_mode);
+
+       return 0;
+}
+
+static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+       enum mlx5hws_table_type tbl_type = matcher->tbl->type;
+       struct mlx5hws_cmd_ft_query_attr ft_attr = {0};
+       struct mlx5hws_pool_chunk *ste;
+       struct mlx5hws_pool *ste_pool;
+       u64 icm_addr_0 = 0;
+       u64 icm_addr_1 = 0;
+       u32 ste_0_id = -1;
+       u32 ste_1_id = -1;
+       int ret;
+
+       seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,0x%llx",
+                  MLX5HWS_DEBUG_RES_TYPE_MATCHER,
+                  HWS_PTR_TO_ID(matcher),
+                  HWS_PTR_TO_ID(matcher->tbl),
+                  matcher->num_of_mt,
+                  matcher->end_ft_id,
+                  matcher->col_matcher ? HWS_PTR_TO_ID(matcher->col_matcher) : 0);
+
+       ste = &matcher->match_ste.ste;
+       ste_pool = matcher->match_ste.pool;
+       if (ste_pool) {
+               ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+               if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
+                       ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+       }
+
+       seq_printf(f, ",%d,%d,%d,%d",
+                  matcher->match_ste.rtc_0_id,
+                  (int)ste_0_id,
+                  matcher->match_ste.rtc_1_id,
+                  (int)ste_1_id);
+
+       ste = &matcher->action_ste[0].ste;
+       ste_pool = matcher->action_ste[0].pool;
+       if (ste_pool) {
+               ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+               if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
+                       ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+               else
+                       ste_1_id = -1;
+       } else {
+               ste_0_id = -1;
+               ste_1_id = -1;
+       }
+
+       ft_attr.type = matcher->tbl->fw_ft_type;
+       ret = mlx5hws_cmd_flow_table_query(matcher->tbl->ctx->mdev,
+                                          matcher->end_ft_id,
+                                          &ft_attr,
+                                          &icm_addr_0,
+                                          &icm_addr_1);
+       if (ret)
+               return ret;
+
+       seq_printf(f, ",%d,%d,%d,%d,%d,0x%llx,0x%llx\n",
+                  matcher->action_ste[0].rtc_0_id,
+                  (int)ste_0_id,
+                  matcher->action_ste[0].rtc_1_id,
+                  (int)ste_1_id,
+                  0,
+                  mlx5hws_debug_icm_to_idx(icm_addr_0),
+                  mlx5hws_debug_icm_to_idx(icm_addr_1));
+
+       ret = hws_debug_dump_matcher_attr(f, matcher);
+       if (ret)
+               return ret;
+
+       ret = hws_debug_dump_matcher_match_template(f, matcher);
+       if (ret)
+               return ret;
+
+       ret = hws_debug_dump_matcher_action_template(f, matcher);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int hws_debug_dump_table(struct seq_file *f, struct mlx5hws_table *tbl)
+{
+       struct mlx5hws_cmd_ft_query_attr ft_attr = {0};
+       struct mlx5hws_matcher *matcher;
+       u64 local_icm_addr_0 = 0;
+       u64 local_icm_addr_1 = 0;
+       u64 icm_addr_0 = 0;
+       u64 icm_addr_1 = 0;
+       int ret;
+
+       seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d,%d,%d",
+                  MLX5HWS_DEBUG_RES_TYPE_TABLE,
+                  HWS_PTR_TO_ID(tbl),
+                  HWS_PTR_TO_ID(tbl->ctx),
+                  tbl->ft_id,
+                  MLX5HWS_TABLE_TYPE_BASE + tbl->type,
+                  tbl->fw_ft_type,
+                  tbl->level,
+                  0);
+
+       ft_attr.type = tbl->fw_ft_type;
+       ret = mlx5hws_cmd_flow_table_query(tbl->ctx->mdev,
+                                          tbl->ft_id,
+                                          &ft_attr,
+                                          &icm_addr_0,
+                                          &icm_addr_1);
+       if (ret)
+               return ret;
+
+       seq_printf(f, ",0x%llx,0x%llx,0x%llx,0x%llx,0x%llx\n",
+                  mlx5hws_debug_icm_to_idx(icm_addr_0),
+                  mlx5hws_debug_icm_to_idx(icm_addr_1),
+                  mlx5hws_debug_icm_to_idx(local_icm_addr_0),
+                  mlx5hws_debug_icm_to_idx(local_icm_addr_1),
+                  HWS_PTR_TO_ID(tbl->default_miss.miss_tbl));
+
+       list_for_each_entry(matcher, &tbl->matchers_list, list_node) {
+               ret = hws_debug_dump_matcher(f, matcher);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int
+hws_debug_dump_context_send_engine(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+       struct mlx5hws_send_engine *send_queue;
+       struct mlx5hws_send_ring *send_ring;
+       struct mlx5hws_send_ring_cq *cq;
+       struct mlx5hws_send_ring_sq *sq;
+       int i;
+
+       for (i = 0; i < (int)ctx->queues; i++) {
+               send_queue = &ctx->send_queue[i];
+               seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
+                          MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_ENGINE,
+                          HWS_PTR_TO_ID(ctx),
+                          i,
+                          send_queue->used_entries,
+                          send_queue->num_entries,
+                          1, /* one send ring per queue */
+                          send_queue->num_entries,
+                          send_queue->err,
+                          send_queue->completed.ci,
+                          send_queue->completed.pi,
+                          send_queue->completed.mask);
+
+               send_ring = &send_queue->send_ring;
+               cq = &send_ring->send_cq;
+               sq = &send_ring->send_sq;
+
+               seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
+                          MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_RING,
+                          HWS_PTR_TO_ID(ctx),
+                          0, /* one send ring per send queue */
+                          i,
+                          cq->mcq.cqn,
+                          0,
+                          0,
+                          0,
+                          0,
+                          0,
+                          0,
+                          cq->mcq.cqe_sz,
+                          sq->sqn,
+                          0,
+                          0,
+                          0);
+       }
+
+       return 0;
+}
+
+static int hws_debug_dump_context_caps(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+       struct mlx5hws_cmd_query_caps *caps = ctx->caps;
+
+       seq_printf(f, "%d,0x%llx,%s,%d,%d,%d,%d,",
+                  MLX5HWS_DEBUG_RES_TYPE_CONTEXT_CAPS,
+                  HWS_PTR_TO_ID(ctx),
+                  caps->fw_ver,
+                  caps->wqe_based_update,
+                  caps->ste_format,
+                  caps->ste_alloc_log_max,
+                  caps->log_header_modify_argument_max_alloc);
+
+       seq_printf(f, "%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%s\n",
+                  caps->flex_protocols,
+                  caps->rtc_reparse_mode,
+                  caps->rtc_index_mode,
+                  caps->ste_alloc_log_gran,
+                  caps->stc_alloc_log_max,
+                  caps->stc_alloc_log_gran,
+                  caps->rtc_log_depth_max,
+                  caps->format_select_gtpu_dw_0,
+                  caps->format_select_gtpu_dw_1,
+                  caps->format_select_gtpu_dw_2,
+                  caps->format_select_gtpu_ext_dw_0,
+                  caps->nic_ft.max_level,
+                  caps->nic_ft.reparse,
+                  caps->fdb_ft.max_level,
+                  caps->fdb_ft.reparse,
+                  caps->log_header_modify_argument_granularity,
+                  caps->linear_match_definer,
+                  "regc_3");
+
+       return 0;
+}
+
+static int hws_debug_dump_context_attr(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+       seq_printf(f, "%u,0x%llx,%d,%zu,%d,%s,%d,%d\n",
+                  MLX5HWS_DEBUG_RES_TYPE_CONTEXT_ATTR,
+                  HWS_PTR_TO_ID(ctx),
+                  ctx->pd_num,
+                  ctx->queues,
+                  ctx->send_queue->num_entries,
+                  "None", /* no shared gvmi */
+                  ctx->caps->vhca_id,
+                  0xffff); /* no shared gvmi */
+
+       return 0;
+}
+
+static int hws_debug_dump_context_info(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+       struct mlx5_core_dev *dev = ctx->mdev;
+       int ret;
+
+       seq_printf(f, "%d,0x%llx,%d,%s,%s.KERNEL_%u_%u_%u\n",
+                  MLX5HWS_DEBUG_RES_TYPE_CONTEXT,
+                  HWS_PTR_TO_ID(ctx),
+                  ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT,
+                  pci_name(dev->pdev),
+                  HWS_DEBUG_FORMAT_VERSION,
+                  LINUX_VERSION_MAJOR,
+                  LINUX_VERSION_PATCHLEVEL,
+                  LINUX_VERSION_SUBLEVEL);
+
+       ret = hws_debug_dump_context_attr(f, ctx);
+       if (ret)
+               return ret;
+
+       ret = hws_debug_dump_context_caps(f, ctx);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int hws_debug_dump_context_stc_resource(struct seq_file *f,
+                                              struct mlx5hws_context *ctx,
+                                              u32 tbl_type,
+                                              struct mlx5hws_pool_resource *resource)
+{
+       seq_printf(f, "%d,0x%llx,%u,%u\n",
+                  MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC,
+                  HWS_PTR_TO_ID(ctx),
+                  tbl_type,
+                  resource->base_id);
+
+       return 0;
+}
+
+static int hws_debug_dump_context_stc(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+       struct mlx5hws_pool *stc_pool;
+       u32 table_type;
+       int ret;
+       int i;
+
+       for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
+               stc_pool = ctx->stc_pool[i];
+               table_type = MLX5HWS_TABLE_TYPE_BASE + i;
+
+               if (!stc_pool)
+                       continue;
+
+               if (stc_pool->resource[0]) {
+                       ret = hws_debug_dump_context_stc_resource(f, ctx, table_type,
+                                                                 stc_pool->resource[0]);
+                       if (ret)
+                               return ret;
+               }
+
+               if (i == MLX5HWS_TABLE_TYPE_FDB && stc_pool->mirror_resource[0]) {
+                       ret = hws_debug_dump_context_stc_resource(f, ctx, table_type,
+                                                                 stc_pool->mirror_resource[0]);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int hws_debug_dump_context(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+       struct mlx5hws_table *tbl;
+       int ret;
+
+       ret = hws_debug_dump_context_info(f, ctx);
+       if (ret)
+               return ret;
+
+       ret = hws_debug_dump_context_send_engine(f, ctx);
+       if (ret)
+               return ret;
+
+       ret = hws_debug_dump_context_stc(f, ctx);
+       if (ret)
+               return ret;
+
+       list_for_each_entry(tbl, &ctx->tbl_list, tbl_list_node) {
+               ret = hws_debug_dump_table(f, tbl);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int
+hws_debug_dump(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+       int ret;
+
+       if (!f || !ctx)
+               return -EINVAL;
+
+       mutex_lock(&ctx->ctrl_lock);
+       ret = hws_debug_dump_context(f, ctx);
+       mutex_unlock(&ctx->ctrl_lock);
+
+       return ret;
+}
+
+static int hws_dump_show(struct seq_file *file, void *priv)
+{
+       return hws_debug_dump(file, file->private);
+}
+DEFINE_SHOW_ATTRIBUTE(hws_dump);
+
+void mlx5hws_debug_init_dump(struct mlx5hws_context *ctx)
+{
+       struct mlx5_core_dev *dev = ctx->mdev;
+       char file_name[128];
+
+       ctx->debug_info.steering_debugfs =
+               debugfs_create_dir("steering", mlx5_debugfs_get_dev_root(dev));
+       ctx->debug_info.fdb_debugfs =
+               debugfs_create_dir("fdb", ctx->debug_info.steering_debugfs);
+
+       sprintf(file_name, "ctx_%p", ctx);
+       debugfs_create_file(file_name, 0444, ctx->debug_info.fdb_debugfs,
+                           ctx, &hws_dump_fops);
+}
+
+void mlx5hws_debug_uninit_dump(struct mlx5hws_context *ctx)
+{
+       debugfs_remove_recursive(ctx->debug_info.steering_debugfs);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h
new file mode 100644 (file)
index 0000000..e44e7ae
--- /dev/null
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef HWS_DEBUG_H_
+#define HWS_DEBUG_H_
+
+#define HWS_DEBUG_FORMAT_VERSION "1.0"
+
+#define HWS_PTR_TO_ID(p) ((u64)(uintptr_t)(p) & 0xFFFFFFFFULL)
+
+enum mlx5hws_debug_res_type {
+       MLX5HWS_DEBUG_RES_TYPE_CONTEXT = 4000,
+       MLX5HWS_DEBUG_RES_TYPE_CONTEXT_ATTR = 4001,
+       MLX5HWS_DEBUG_RES_TYPE_CONTEXT_CAPS = 4002,
+       MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_ENGINE = 4003,
+       MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_RING = 4004,
+       MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC = 4005,
+
+       MLX5HWS_DEBUG_RES_TYPE_TABLE = 4100,
+
+       MLX5HWS_DEBUG_RES_TYPE_MATCHER = 4200,
+       MLX5HWS_DEBUG_RES_TYPE_MATCHER_ATTR = 4201,
+       MLX5HWS_DEBUG_RES_TYPE_MATCHER_MATCH_TEMPLATE = 4202,
+       MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_MATCH_DEFINER = 4203,
+       MLX5HWS_DEBUG_RES_TYPE_MATCHER_ACTION_TEMPLATE = 4204,
+       MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_HASH_DEFINER = 4205,
+       MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_RANGE_DEFINER = 4206,
+       MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_COMPARE_MATCH_DEFINER = 4207,
+};
+
+static inline u64
+mlx5hws_debug_icm_to_idx(u64 icm_addr)
+{
+       return (icm_addr >> 6) & 0xffffffff;
+}
+
+void mlx5hws_debug_init_dump(struct mlx5hws_context *ctx);
+void mlx5hws_debug_uninit_dump(struct mlx5hws_context *ctx);
+
+#endif /* HWS_DEBUG_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
new file mode 100644 (file)
index 0000000..8fe96eb
--- /dev/null
@@ -0,0 +1,2146 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+/* Pattern tunnel Layer bits. */
+#define MLX5_FLOW_LAYER_VXLAN      BIT(12)
+#define MLX5_FLOW_LAYER_VXLAN_GPE  BIT(13)
+#define MLX5_FLOW_LAYER_GRE        BIT(14)
+#define MLX5_FLOW_LAYER_MPLS       BIT(15)
+
+/* Pattern tunnel Layer bits (continued). */
+#define MLX5_FLOW_LAYER_IPIP       BIT(23)
+#define MLX5_FLOW_LAYER_IPV6_ENCAP BIT(24)
+#define MLX5_FLOW_LAYER_NVGRE      BIT(25)
+#define MLX5_FLOW_LAYER_GENEVE     BIT(26)
+
+#define MLX5_FLOW_ITEM_FLEX_TUNNEL BIT_ULL(39)
+
+/* Tunnel Masks. */
+#define MLX5_FLOW_LAYER_TUNNEL \
+       (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
+        MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
+        MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
+        MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \
+        MLX5_FLOW_ITEM_FLEX_TUNNEL)
+
+#define GTP_PDU_SC     0x85
+#define BAD_PORT       0xBAD
+#define ETH_TYPE_IPV4_VXLAN    0x0800
+#define ETH_TYPE_IPV6_VXLAN    0x86DD
+#define UDP_GTPU_PORT  2152
+#define UDP_PORT_MPLS  6635
+#define UDP_GENEVE_PORT 6081
+#define UDP_ROCEV2_PORT        4791
+#define HWS_FLOW_LAYER_TUNNEL_NO_MPLS (MLX5_FLOW_LAYER_TUNNEL & ~MLX5_FLOW_LAYER_MPLS)
+
+#define STE_NO_VLAN    0x0
+#define STE_SVLAN      0x1
+#define STE_CVLAN      0x2
+#define STE_NO_L3      0x0
+#define STE_IPV4       0x1
+#define STE_IPV6       0x2
+#define STE_NO_L4      0x0
+#define STE_TCP                0x1
+#define STE_UDP                0x2
+#define STE_ICMP       0x3
+#define STE_ESP                0x3
+
+#define IPV4 0x4
+#define IPV6 0x6
+
+/* Setter function based on bit offset and mask, for 32bit DW */
+#define _HWS_SET32(p, v, byte_off, bit_off, mask) \
+       do { \
+               u32 _v = v; \
+               *((__be32 *)(p) + ((byte_off) / 4)) = \
+               cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + \
+                            ((byte_off) / 4))) & \
+                            (~((mask) << (bit_off)))) | \
+                           (((_v) & (mask)) << \
+                             (bit_off))); \
+       } while (0)
+
+/* Setter function based on bit offset and mask, for unaligned 32bit DW */
+#define HWS_SET32(p, v, byte_off, bit_off, mask) \
+       do { \
+               if (unlikely((bit_off) < 0)) { \
+                       u32 _bit_off = -1 * (bit_off); \
+                       u32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \
+                       _HWS_SET32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \
+                       _HWS_SET32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \
+                                   (bit_off) % BITS_IN_DW, second_dw_mask); \
+               } else { \
+                       _HWS_SET32(p, v, byte_off, (bit_off), (mask)); \
+               } \
+       } while (0)
+
+/* Getter for up to aligned 32bit DW */
+#define HWS_GET32(p, byte_off, bit_off, mask) \
+       ((be32_to_cpu(*((__be32 *)(p) + ((byte_off) / 4))) >> (bit_off)) & (mask))
+
+#define HWS_CALC_FNAME(field, inner) \
+       ((inner) ? MLX5HWS_DEFINER_FNAME_##field##_I : \
+                  MLX5HWS_DEFINER_FNAME_##field##_O)
+
+#define HWS_GET_MATCH_PARAM(match_param, hdr) \
+       MLX5_GET(fte_match_param, match_param, hdr)
+
+#define HWS_IS_FLD_SET(match_param, hdr) \
+       (!!(HWS_GET_MATCH_PARAM(match_param, hdr)))
+
+#define HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) ({ \
+               BUILD_BUG_ON((sz_in_bits) % 32); \
+               u32 sz = sz_in_bits; \
+               u32 res = 0; \
+               u32 dw_off = __mlx5_dw_off(fte_match_param, hdr); \
+               while (!res && sz >= 32) { \
+                       res = *((match_param) + (dw_off++)); \
+                       sz -= 32; \
+               } \
+               res; \
+       })
+
+#define HWS_IS_FLD_SET_SZ(match_param, hdr, sz_in_bits) \
+       (((sz_in_bits) > 32) ? HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) : \
+                              !!(HWS_GET_MATCH_PARAM(match_param, hdr)))
+
+#define HWS_GET64_MATCH_PARAM(match_param, hdr) \
+       MLX5_GET64(fte_match_param, match_param, hdr)
+
+#define HWS_IS_FLD64_SET(match_param, hdr) \
+       (!!(HWS_GET64_MATCH_PARAM(match_param, hdr)))
+
+#define HWS_CALC_HDR_SRC(fc, s_hdr) \
+       do { \
+               (fc)->s_bit_mask = __mlx5_mask(fte_match_param, s_hdr); \
+               (fc)->s_bit_off = __mlx5_dw_bit_off(fte_match_param, s_hdr); \
+               (fc)->s_byte_off = MLX5_BYTE_OFF(fte_match_param, s_hdr); \
+       } while (0)
+
+#define HWS_CALC_HDR_DST(fc, d_hdr) \
+       do { \
+               (fc)->bit_mask = __mlx5_mask(definer_hl, d_hdr); \
+               (fc)->bit_off = __mlx5_dw_bit_off(definer_hl, d_hdr); \
+               (fc)->byte_off = MLX5_BYTE_OFF(definer_hl, d_hdr); \
+       } while (0)
+
+#define HWS_CALC_HDR(fc, s_hdr, d_hdr) \
+       do { \
+               HWS_CALC_HDR_SRC(fc, s_hdr); \
+               HWS_CALC_HDR_DST(fc, d_hdr); \
+               (fc)->tag_set = &hws_definer_generic_set; \
+       } while (0)
+
+#define HWS_SET_HDR(fc_arr, match_param, fname, s_hdr, d_hdr) \
+       do { \
+               if (HWS_IS_FLD_SET(match_param, s_hdr)) \
+                       HWS_CALC_HDR(&(fc_arr)[MLX5HWS_DEFINER_FNAME_##fname], s_hdr, d_hdr); \
+       } while (0)
+
+struct mlx5hws_definer_sel_ctrl {
+       u8 allowed_full_dw; /* Full DW selectors cover all offsets */
+       u8 allowed_lim_dw;  /* Limited DW selectors cover offset < 64 */
+       u8 allowed_bytes;   /* Bytes selectors, up to offset 255 */
+       u8 used_full_dw;
+       u8 used_lim_dw;
+       u8 used_bytes;
+       u8 full_dw_selector[DW_SELECTORS];
+       u8 lim_dw_selector[DW_SELECTORS_LIMITED];
+       u8 byte_selector[BYTE_SELECTORS];
+};
+
+struct mlx5hws_definer_conv_data {
+       struct mlx5hws_context *ctx;
+       struct mlx5hws_definer_fc *fc;
+       /* enum mlx5hws_definer_match_flag */
+       u32 match_flags;
+};
+
+static void
+hws_definer_ones_set(struct mlx5hws_definer_fc *fc,
+                    void *match_param,
+                    u8 *tag)
+{
+       HWS_SET32(tag, -1, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_generic_set(struct mlx5hws_definer_fc *fc,
+                       void *match_param,
+                       u8 *tag)
+{
+       /* Can be optimized */
+       u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask);
+
+       HWS_SET32(tag, val, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_outer_vlan_type_set(struct mlx5hws_definer_fc *fc,
+                               void *match_param,
+                               u8 *tag)
+{
+       if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag))
+               HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+       else if (HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag))
+               HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+       else
+               HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_inner_vlan_type_set(struct mlx5hws_definer_fc *fc,
+                               void *match_param,
+                               u8 *tag)
+{
+       if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag))
+               HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+       else if (HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag))
+               HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+       else
+               HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
+                                void *match_param,
+                                u8 *tag,
+                                bool inner)
+{
+       u32 second_cvlan_tag = inner ?
+               HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) :
+               HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag);
+       u32 second_svlan_tag = inner ?
+               HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag) :
+               HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag);
+
+       if (second_cvlan_tag)
+               HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+       else if (second_svlan_tag)
+               HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+       else
+               HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_inner_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
+                                      void *match_param,
+                                      u8 *tag)
+{
+       hws_definer_second_vlan_type_set(fc, match_param, tag, true);
+}
+
+static void
+hws_definer_outer_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
+                                      void *match_param,
+                                      u8 *tag)
+{
+       hws_definer_second_vlan_type_set(fc, match_param, tag, false);
+}
+
+static void hws_definer_icmp_dw1_set(struct mlx5hws_definer_fc *fc,
+                                    void *match_param,
+                                    u8 *tag)
+{
+       u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_code);
+       u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_type);
+       u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) |
+                (code << __mlx5_dw_bit_off(header_icmp, code));
+
+       HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_icmpv6_dw1_set(struct mlx5hws_definer_fc *fc,
+                          void *match_param,
+                          u8 *tag)
+{
+       u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_code);
+       u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_type);
+       u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) |
+                (code << __mlx5_dw_bit_off(header_icmp, code));
+
+       HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_l3_type_set(struct mlx5hws_definer_fc *fc,
+                       void *match_param,
+                       u8 *tag)
+{
+       u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask);
+
+       if (val == IPV4)
+               HWS_SET32(tag, STE_IPV4, fc->byte_off, fc->bit_off, fc->bit_mask);
+       else if (val == IPV6)
+               HWS_SET32(tag, STE_IPV6, fc->byte_off, fc->bit_off, fc->bit_mask);
+       else
+               HWS_SET32(tag, STE_NO_L3, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_set_source_port_gvmi(struct mlx5hws_definer_fc *fc,
+                                void *match_param,
+                                u8 *tag,
+                                struct mlx5hws_context *peer_ctx)
+{
+       u16 source_port = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port);
+       u16 vport_gvmi = 0;
+       int ret;
+
+       ret = mlx5hws_vport_get_gvmi(peer_ctx, source_port, &vport_gvmi);
+       if (ret) {
+               HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask);
+               mlx5hws_err(fc->ctx, "Vport 0x%x is disabled or invalid\n", source_port);
+               return;
+       }
+
+       if (vport_gvmi)
+               HWS_SET32(tag, vport_gvmi, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_set_source_gvmi_vhca_id(struct mlx5hws_definer_fc *fc,
+                                   void *match_param,
+                                   u8 *tag)
+__must_hold(&fc->ctx->ctrl_lock)
+{
+       int id = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_eswitch_owner_vhca_id);
+       struct mlx5hws_context *peer_ctx;
+
+       if (id == fc->ctx->caps->vhca_id)
+               peer_ctx = fc->ctx;
+       else
+               peer_ctx = xa_load(&fc->ctx->peer_ctx_xa, id);
+
+       if (!peer_ctx) {
+               HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask);
+               mlx5hws_err(fc->ctx, "Invalid vhca_id provided 0x%x\n", id);
+               return;
+       }
+
+       hws_definer_set_source_port_gvmi(fc, match_param, tag, peer_ctx);
+}
+
+static void
+hws_definer_set_source_gvmi(struct mlx5hws_definer_fc *fc,
+                           void *match_param,
+                           u8 *tag)
+{
+       hws_definer_set_source_port_gvmi(fc, match_param, tag, fc->ctx);
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_flex_parser_steering_ok_bits_handler(struct mlx5hws_definer_conv_data *cd,
+                                                u8 parser_id)
+{
+       struct mlx5hws_definer_fc *fc;
+
+       switch (parser_id) {
+       case 0:
+               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK];
+               HWS_CALC_HDR_DST(fc, oks1.flex_parser0_steering_ok);
+               fc->tag_set = &hws_definer_generic_set;
+               break;
+       case 1:
+               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK];
+               HWS_CALC_HDR_DST(fc, oks1.flex_parser1_steering_ok);
+               fc->tag_set = &hws_definer_generic_set;
+               break;
+       case 2:
+               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK];
+               HWS_CALC_HDR_DST(fc, oks1.flex_parser2_steering_ok);
+               fc->tag_set = &hws_definer_generic_set;
+               break;
+       case 3:
+               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK];
+               HWS_CALC_HDR_DST(fc, oks1.flex_parser3_steering_ok);
+               fc->tag_set = &hws_definer_generic_set;
+               break;
+       case 4:
+               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK];
+               HWS_CALC_HDR_DST(fc, oks1.flex_parser4_steering_ok);
+               fc->tag_set = &hws_definer_generic_set;
+               break;
+       case 5:
+               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK];
+               HWS_CALC_HDR_DST(fc, oks1.flex_parser5_steering_ok);
+               fc->tag_set = &hws_definer_generic_set;
+               break;
+       case 6:
+               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK];
+               HWS_CALC_HDR_DST(fc, oks1.flex_parser6_steering_ok);
+               fc->tag_set = &hws_definer_generic_set;
+               break;
+       case 7:
+               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK];
+               HWS_CALC_HDR_DST(fc, oks1.flex_parser7_steering_ok);
+               fc->tag_set = &hws_definer_generic_set;
+               break;
+       default:
+               mlx5hws_err(cd->ctx, "Unsupported flex parser steering ok index %u\n", parser_id);
+               return NULL;
+       }
+
+       return fc;
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_flex_parser_handler(struct mlx5hws_definer_conv_data *cd,
+                               u8 parser_id)
+{
+       struct mlx5hws_definer_fc *fc;
+
+       switch (parser_id) {
+       case 0:
+               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0];
+               HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_0);
+               fc->tag_set = &hws_definer_generic_set;
+               break;
+       case 1:
+               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1];
+               HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_1);
+               fc->tag_set = &hws_definer_generic_set;
+               break;
+       case 2:
+               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2];
+               HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_2);
+               fc->tag_set = &hws_definer_generic_set;
+               break;
+       case 3:
+               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3];
+               HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_3);
+               fc->tag_set = &hws_definer_generic_set;
+               break;
+       case 4:
+               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4];
+               HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_4);
+               fc->tag_set = &hws_definer_generic_set;
+               break;
+       case 5:
+               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5];
+               HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_5);
+               fc->tag_set = &hws_definer_generic_set;
+               break;
+       case 6:
+               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6];
+               HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_6);
+               fc->tag_set = &hws_definer_generic_set;
+               break;
+       case 7:
+               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7];
+               HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_7);
+               fc->tag_set = &hws_definer_generic_set;
+               break;
+       default:
+               mlx5hws_err(cd->ctx, "Unsupported flex parser %u\n", parser_id);
+               return NULL;
+       }
+
+       return fc;
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_misc4_fields_handler(struct mlx5hws_definer_conv_data *cd,
+                                bool *parser_is_used,
+                                u32 id,
+                                u32 value)
+{
+       if (id || value) {
+               if (id >= HWS_NUM_OF_FLEX_PARSERS) {
+                       mlx5hws_err(cd->ctx, "Unsupported parser id\n");
+                       return NULL;
+               }
+
+               if (parser_is_used[id]) {
+                       mlx5hws_err(cd->ctx, "Parser id have been used\n");
+                       return NULL;
+               }
+       }
+
+       parser_is_used[id] = true;
+
+       return hws_definer_flex_parser_handler(cd, id);
+}
+
+static int
+hws_definer_check_match_flags(struct mlx5hws_definer_conv_data *cd)
+{
+       u32 flags;
+
+       flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE |
+                                  MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE |
+                                  MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU |
+                                  MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE |
+                                  MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN |
+                                  MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1);
+       if (flags & (flags - 1))
+               goto err_conflict;
+
+       flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY |
+                                  MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2);
+
+       if (flags & (flags - 1))
+               goto err_conflict;
+
+       flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE |
+                                  MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP);
+       if (flags & (flags - 1))
+               goto err_conflict;
+
+       flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4 |
+                                  MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6 |
+                                  MLX5HWS_DEFINER_MATCH_FLAG_TCP_O |
+                                  MLX5HWS_DEFINER_MATCH_FLAG_TCP_I);
+       if (flags & (flags - 1))
+               goto err_conflict;
+
+       return 0;
+
+err_conflict:
+       mlx5hws_err(cd->ctx, "Invalid definer fields combination\n");
+       return -EINVAL;
+}
+
+static int
+hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
+                      u32 *match_param)
+{
+       bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
+       struct mlx5hws_definer_fc *fc = cd->fc;
+       struct mlx5hws_definer_fc *curr_fc;
+       u32 *s_ipv6, *d_ipv6;
+
+       if (HWS_IS_FLD_SET_SZ(match_param, outer_headers.l4_type, 0x2) ||
+           HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c2, 0xe) ||
+           HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c4, 0x4)) {
+               mlx5hws_err(cd->ctx, "Unsupported outer parameters set\n");
+               return -EINVAL;
+       }
+
+       /* L2 Check ethertype */
+       HWS_SET_HDR(fc, match_param, ETH_TYPE_O,
+                   outer_headers.ethertype,
+                   eth_l2_outer.l3_ethertype);
+       /* L2 Check SMAC 47_16 */
+       HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_O,
+                   outer_headers.smac_47_16, eth_l2_src_outer.smac_47_16);
+       /* L2 Check SMAC 15_0 */
+       HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_O,
+                   outer_headers.smac_15_0, eth_l2_src_outer.smac_15_0);
+       /* L2 Check DMAC 47_16 */
+       HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_O,
+                   outer_headers.dmac_47_16, eth_l2_outer.dmac_47_16);
+       /* L2 Check DMAC 15_0 */
+       HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_O,
+                   outer_headers.dmac_15_0, eth_l2_outer.dmac_15_0);
+
+       /* L2 VLAN */
+       HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_O,
+                   outer_headers.first_prio, eth_l2_outer.first_priority);
+       HWS_SET_HDR(fc, match_param, VLAN_CFI_O,
+                   outer_headers.first_cfi, eth_l2_outer.first_cfi);
+       HWS_SET_HDR(fc, match_param, VLAN_ID_O,
+                   outer_headers.first_vid, eth_l2_outer.first_vlan_id);
+
+       /* L2 CVLAN and SVLAN */
+       if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag) ||
+           HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag)) {
+               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O];
+               HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.first_vlan_qualifier);
+               curr_fc->tag_set = &hws_definer_outer_vlan_type_set;
+               curr_fc->tag_mask_set = &hws_definer_ones_set;
+       }
+
+       /* L3 Check IP header */
+       HWS_SET_HDR(fc, match_param, IP_PROTOCOL_O,
+                   outer_headers.ip_protocol,
+                   eth_l3_outer.protocol_next_header);
+       HWS_SET_HDR(fc, match_param, IP_TTL_O,
+                   outer_headers.ttl_hoplimit,
+                   eth_l3_outer.time_to_live_hop_limit);
+
+       /* L3 Check IPv4/IPv6 addresses */
+       s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+                             outer_headers.src_ipv4_src_ipv6.ipv6_layout);
+       d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+                             outer_headers.dst_ipv4_dst_ipv6.ipv6_layout);
+
+       /* Assume IPv6 is used if ipv6 bits are set */
+       is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
+       is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+
+       if (is_s_ipv6) {
+               /* Handle IPv6 source address */
+               HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_O,
+                           outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
+                           ipv6_src_outer.ipv6_address_127_96);
+               HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_O,
+                           outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64,
+                           ipv6_src_outer.ipv6_address_95_64);
+               HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_O,
+                           outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32,
+                           ipv6_src_outer.ipv6_address_63_32);
+               HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_O,
+                           outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+                           ipv6_src_outer.ipv6_address_31_0);
+       } else {
+               /* Handle IPv4 source address */
+               HWS_SET_HDR(fc, match_param, IPV4_SRC_O,
+                           outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+                           ipv4_src_dest_outer.source_address);
+       }
+       if (is_d_ipv6) {
+               /* Handle IPv6 destination address */
+               HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_O,
+                           outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
+                           ipv6_dst_outer.ipv6_address_127_96);
+               HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_O,
+                           outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64,
+                           ipv6_dst_outer.ipv6_address_95_64);
+               HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_O,
+                           outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32,
+                           ipv6_dst_outer.ipv6_address_63_32);
+               HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_O,
+                           outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+                           ipv6_dst_outer.ipv6_address_31_0);
+       } else {
+               /* Handle IPv4 destination address */
+               HWS_SET_HDR(fc, match_param, IPV4_DST_O,
+                           outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+                           ipv4_src_dest_outer.destination_address);
+       }
+
+       /* L4 Handle TCP/UDP */
+       HWS_SET_HDR(fc, match_param, L4_SPORT_O,
+                   outer_headers.tcp_sport, eth_l4_outer.source_port);
+       HWS_SET_HDR(fc, match_param, L4_DPORT_O,
+                   outer_headers.tcp_dport, eth_l4_outer.destination_port);
+       HWS_SET_HDR(fc, match_param, L4_SPORT_O,
+                   outer_headers.udp_sport, eth_l4_outer.source_port);
+       HWS_SET_HDR(fc, match_param, L4_DPORT_O,
+                   outer_headers.udp_dport, eth_l4_outer.destination_port);
+       HWS_SET_HDR(fc, match_param, TCP_FLAGS_O,
+                   outer_headers.tcp_flags, eth_l4_outer.tcp_flags);
+
+       /* L3 Handle DSCP, ECN and IHL  */
+       HWS_SET_HDR(fc, match_param, IP_DSCP_O,
+                   outer_headers.ip_dscp, eth_l3_outer.dscp);
+       HWS_SET_HDR(fc, match_param, IP_ECN_O,
+                   outer_headers.ip_ecn, eth_l3_outer.ecn);
+       HWS_SET_HDR(fc, match_param, IPV4_IHL_O,
+                   outer_headers.ipv4_ihl, eth_l3_outer.ihl);
+
+       /* Set IP fragmented bit */
+       if (HWS_IS_FLD_SET(match_param, outer_headers.frag)) {
+               smac_set = HWS_IS_FLD_SET(match_param, outer_headers.smac_15_0) ||
+                               HWS_IS_FLD_SET(match_param, outer_headers.smac_47_16);
+               dmac_set = HWS_IS_FLD_SET(match_param, outer_headers.dmac_15_0) ||
+                               HWS_IS_FLD_SET(match_param, outer_headers.dmac_47_16);
+               if (smac_set == dmac_set) {
+                       HWS_SET_HDR(fc, match_param, IP_FRAG_O,
+                                   outer_headers.frag, eth_l4_outer.ip_fragmented);
+               } else {
+                       HWS_SET_HDR(fc, match_param, IP_FRAG_O,
+                                   outer_headers.frag, eth_l2_src_outer.ip_fragmented);
+               }
+       }
+
+       /* L3_type set */
+       if (HWS_IS_FLD_SET(match_param, outer_headers.ip_version)) {
+               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O];
+               HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.l3_type);
+               curr_fc->tag_set = &hws_definer_l3_type_set;
+               curr_fc->tag_mask_set = &hws_definer_ones_set;
+               HWS_CALC_HDR_SRC(curr_fc, outer_headers.ip_version);
+       }
+
+       return 0;
+}
+
+static int
+hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
+                      u32 *match_param)
+{
+       bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
+       struct mlx5hws_definer_fc *fc = cd->fc;
+       struct mlx5hws_definer_fc *curr_fc;
+       u32 *s_ipv6, *d_ipv6;
+
+       if (HWS_IS_FLD_SET_SZ(match_param, inner_headers.l4_type, 0x2) ||
+           HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c2, 0xe) ||
+           HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c4, 0x4)) {
+               mlx5hws_err(cd->ctx, "Unsupported inner parameters set\n");
+               return -EINVAL;
+       }
+
+       /* L2 Check ethertype */
+       HWS_SET_HDR(fc, match_param, ETH_TYPE_I,
+                   inner_headers.ethertype,
+                   eth_l2_inner.l3_ethertype);
+       /* L2 Check SMAC 47_16 */
+       HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_I,
+                   inner_headers.smac_47_16, eth_l2_src_inner.smac_47_16);
+       /* L2 Check SMAC 15_0 */
+       HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_I,
+                   inner_headers.smac_15_0, eth_l2_src_inner.smac_15_0);
+       /* L2 Check DMAC 47_16 */
+       HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_I,
+                   inner_headers.dmac_47_16, eth_l2_inner.dmac_47_16);
+       /* L2 Check DMAC 15_0 */
+       HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_I,
+                   inner_headers.dmac_15_0, eth_l2_inner.dmac_15_0);
+
+       /* L2 VLAN */
+       HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_I,
+                   inner_headers.first_prio, eth_l2_inner.first_priority);
+       HWS_SET_HDR(fc, match_param, VLAN_CFI_I,
+                   inner_headers.first_cfi, eth_l2_inner.first_cfi);
+       HWS_SET_HDR(fc, match_param, VLAN_ID_I,
+                   inner_headers.first_vid, eth_l2_inner.first_vlan_id);
+
+       /* L2 CVLAN and SVLAN */
+       if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag) ||
+           HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag)) {
+               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I];
+               HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.first_vlan_qualifier);
+               curr_fc->tag_set = &hws_definer_inner_vlan_type_set;
+               curr_fc->tag_mask_set = &hws_definer_ones_set;
+       }
+       /* L3 Check IP header */
+       HWS_SET_HDR(fc, match_param, IP_PROTOCOL_I,
+                   inner_headers.ip_protocol,
+                   eth_l3_inner.protocol_next_header);
+       HWS_SET_HDR(fc, match_param, IP_VERSION_I,
+                   inner_headers.ip_version,
+                   eth_l3_inner.ip_version);
+       HWS_SET_HDR(fc, match_param, IP_TTL_I,
+                   inner_headers.ttl_hoplimit,
+                   eth_l3_inner.time_to_live_hop_limit);
+
+       /* L3 Check IPv4/IPv6 addresses */
+       s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+                             inner_headers.src_ipv4_src_ipv6.ipv6_layout);
+       d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+                             inner_headers.dst_ipv4_dst_ipv6.ipv6_layout);
+
+       /* Assume IPv6 is used if ipv6 bits are set */
+       is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
+       is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+
+       if (is_s_ipv6) {
+               /* Handle IPv6 source address */
+               HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_I,
+                           inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
+                           ipv6_src_inner.ipv6_address_127_96);
+               HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_I,
+                           inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64,
+                           ipv6_src_inner.ipv6_address_95_64);
+               HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_I,
+                           inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32,
+                           ipv6_src_inner.ipv6_address_63_32);
+               HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_I,
+                           inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+                           ipv6_src_inner.ipv6_address_31_0);
+       } else {
+               /* Handle IPv4 source address */
+               HWS_SET_HDR(fc, match_param, IPV4_SRC_I,
+                           inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+                           ipv4_src_dest_inner.source_address);
+       }
+       if (is_d_ipv6) {
+               /* Handle IPv6 destination address */
+               HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_I,
+                           inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
+                           ipv6_dst_inner.ipv6_address_127_96);
+               HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_I,
+                           inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64,
+                           ipv6_dst_inner.ipv6_address_95_64);
+               HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_I,
+                           inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32,
+                           ipv6_dst_inner.ipv6_address_63_32);
+               HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_I,
+                           inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+                           ipv6_dst_inner.ipv6_address_31_0);
+       } else {
+               /* Handle IPv4 destination address */
+               HWS_SET_HDR(fc, match_param, IPV4_DST_I,
+                           inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+                           ipv4_src_dest_inner.destination_address);
+       }
+
+       /* L4 Handle TCP/UDP */
+       HWS_SET_HDR(fc, match_param, L4_SPORT_I,
+                   inner_headers.tcp_sport, eth_l4_inner.source_port);
+       HWS_SET_HDR(fc, match_param, L4_DPORT_I,
+                   inner_headers.tcp_dport, eth_l4_inner.destination_port);
+       HWS_SET_HDR(fc, match_param, L4_SPORT_I,
+                   inner_headers.udp_sport, eth_l4_inner.source_port);
+       HWS_SET_HDR(fc, match_param, L4_DPORT_I,
+                   inner_headers.udp_dport, eth_l4_inner.destination_port);
+       HWS_SET_HDR(fc, match_param, TCP_FLAGS_I,
+                   inner_headers.tcp_flags, eth_l4_inner.tcp_flags);
+
+       /* L3 Handle DSCP, ECN and IHL  */
+       HWS_SET_HDR(fc, match_param, IP_DSCP_I,
+                   inner_headers.ip_dscp, eth_l3_inner.dscp);
+       HWS_SET_HDR(fc, match_param, IP_ECN_I,
+                   inner_headers.ip_ecn, eth_l3_inner.ecn);
+       HWS_SET_HDR(fc, match_param, IPV4_IHL_I,
+                   inner_headers.ipv4_ihl, eth_l3_inner.ihl);
+
+       /* Set IP fragmented bit */
+       if (HWS_IS_FLD_SET(match_param, inner_headers.frag)) {
+               if (HWS_IS_FLD_SET(match_param, misc_parameters.vxlan_vni)) {
+                       HWS_SET_HDR(fc, match_param, IP_FRAG_I,
+                                   inner_headers.frag, eth_l2_inner.ip_fragmented);
+               } else {
+                       smac_set = HWS_IS_FLD_SET(match_param, inner_headers.smac_15_0) ||
+                                  HWS_IS_FLD_SET(match_param, inner_headers.smac_47_16);
+                       dmac_set = HWS_IS_FLD_SET(match_param, inner_headers.dmac_15_0) ||
+                                  HWS_IS_FLD_SET(match_param, inner_headers.dmac_47_16);
+                       if (smac_set == dmac_set) {
+                               HWS_SET_HDR(fc, match_param, IP_FRAG_I,
+                                           inner_headers.frag, eth_l4_inner.ip_fragmented);
+                       } else {
+                               HWS_SET_HDR(fc, match_param, IP_FRAG_I,
+                                           inner_headers.frag, eth_l2_src_inner.ip_fragmented);
+                       }
+               }
+       }
+
+       /* L3_type set */
+       if (HWS_IS_FLD_SET(match_param, inner_headers.ip_version)) {
+               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I];
+               HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.l3_type);
+               curr_fc->tag_set = &hws_definer_l3_type_set;
+               curr_fc->tag_mask_set = &hws_definer_ones_set;
+               HWS_CALC_HDR_SRC(curr_fc, inner_headers.ip_version);
+       }
+
+       return 0;
+}
+
+static int
+hws_definer_conv_misc(struct mlx5hws_definer_conv_data *cd,
+                     u32 *match_param)
+{
+       struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
+       struct mlx5hws_definer_fc *fc = cd->fc;
+       struct mlx5hws_definer_fc *curr_fc;
+
+       if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1, 0x1) ||
+           HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_64, 0xc) ||
+           HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_d8, 0x6) ||
+           HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_e0, 0xc) ||
+           HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_100, 0xc) ||
+           HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_120, 0xa) ||
+           HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_140, 0x8) ||
+           HWS_IS_FLD_SET(match_param, misc_parameters.bth_dst_qp) ||
+           HWS_IS_FLD_SET(match_param, misc_parameters.bth_opcode) ||
+           HWS_IS_FLD_SET(match_param, misc_parameters.inner_esp_spi) ||
+           HWS_IS_FLD_SET(match_param, misc_parameters.outer_esp_spi) ||
+           HWS_IS_FLD_SET(match_param, misc_parameters.source_vhca_port) ||
+           HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1a0, 0x60)) {
+               mlx5hws_err(cd->ctx, "Unsupported misc parameters set\n");
+               return -EINVAL;
+       }
+
+       /* Check GRE related fields */
+       if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_c_present)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_C];
+               HWS_CALC_HDR(curr_fc,
+                            misc_parameters.gre_c_present,
+                            tunnel_header.tunnel_header_0);
+               curr_fc->bit_mask = __mlx5_mask(header_gre, gre_c_present);
+               curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_c_present);
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_k_present)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_K];
+               HWS_CALC_HDR(curr_fc,
+                            misc_parameters.gre_k_present,
+                            tunnel_header.tunnel_header_0);
+               curr_fc->bit_mask = __mlx5_mask(header_gre, gre_k_present);
+               curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_k_present);
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_s_present)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_S];
+               HWS_CALC_HDR(curr_fc,
+                            misc_parameters.gre_s_present,
+                            tunnel_header.tunnel_header_0);
+               curr_fc->bit_mask = __mlx5_mask(header_gre, gre_s_present);
+               curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_s_present);
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_protocol)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL];
+               HWS_CALC_HDR(curr_fc,
+                            misc_parameters.gre_protocol,
+                            tunnel_header.tunnel_header_0);
+               curr_fc->bit_mask = __mlx5_mask(header_gre, gre_protocol);
+               curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_protocol);
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_key.key)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE |
+                                  MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY;
+               HWS_SET_HDR(fc, match_param, GRE_OPT_KEY,
+                           misc_parameters.gre_key.key, tunnel_header.tunnel_header_2);
+       }
+
+       /* Check GENEVE related fields */
+       if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_vni)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_VNI];
+               HWS_CALC_HDR(curr_fc,
+                            misc_parameters.geneve_vni,
+                            tunnel_header.tunnel_header_1);
+               curr_fc->bit_mask = __mlx5_mask(header_geneve, vni);
+               curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, vni);
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_opt_len)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN];
+               HWS_CALC_HDR(curr_fc,
+                            misc_parameters.geneve_opt_len,
+                            tunnel_header.tunnel_header_0);
+               curr_fc->bit_mask = __mlx5_mask(header_geneve, opt_len);
+               curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, opt_len);
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_protocol_type)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_PROTO];
+               HWS_CALC_HDR(curr_fc,
+                            misc_parameters.geneve_protocol_type,
+                            tunnel_header.tunnel_header_0);
+               curr_fc->bit_mask = __mlx5_mask(header_geneve, protocol_type);
+               curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, protocol_type);
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_oam)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OAM];
+               HWS_CALC_HDR(curr_fc,
+                            misc_parameters.geneve_oam,
+                            tunnel_header.tunnel_header_0);
+               curr_fc->bit_mask = __mlx5_mask(header_geneve, o_flag);
+               curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, o_flag);
+       }
+
+       HWS_SET_HDR(fc, match_param, SOURCE_QP,
+                   misc_parameters.source_sqn, source_qp_gvmi.source_qp);
+       HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_O,
+                   misc_parameters.outer_ipv6_flow_label, eth_l3_outer.flow_label);
+       HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_I,
+                   misc_parameters.inner_ipv6_flow_label, eth_l3_inner.flow_label);
+
+       /* L2 Second VLAN */
+       HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_O,
+                   misc_parameters.outer_second_prio, eth_l2_outer.second_priority);
+       HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_I,
+                   misc_parameters.inner_second_prio, eth_l2_inner.second_priority);
+       HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_O,
+                   misc_parameters.outer_second_cfi, eth_l2_outer.second_cfi);
+       HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_I,
+                   misc_parameters.inner_second_cfi, eth_l2_inner.second_cfi);
+       HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_O,
+                   misc_parameters.outer_second_vid, eth_l2_outer.second_vlan_id);
+       HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_I,
+                   misc_parameters.inner_second_vid, eth_l2_inner.second_vlan_id);
+
+       /* L2 Second CVLAN and SVLAN */
+       if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag) ||
+           HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag)) {
+               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O];
+               HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.second_vlan_qualifier);
+               curr_fc->tag_set = &hws_definer_outer_second_vlan_type_set;
+               curr_fc->tag_mask_set = &hws_definer_ones_set;
+       }
+
+       if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) ||
+           HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag)) {
+               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I];
+               HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.second_vlan_qualifier);
+               curr_fc->tag_set = &hws_definer_inner_second_vlan_type_set;
+               curr_fc->tag_mask_set = &hws_definer_ones_set;
+       }
+
+       /* VXLAN VNI  */
+       if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.vxlan_vni)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN;
+               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_VNI];
+               HWS_CALC_HDR(curr_fc, misc_parameters.vxlan_vni, tunnel_header.tunnel_header_1);
+               curr_fc->bit_mask = __mlx5_mask(header_vxlan, vni);
+               curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan, vni);
+       }
+
+       /* Flex protocol steering ok bits */
+       if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.geneve_tlv_option_0_exist)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+
+               if (!caps->flex_parser_ok_bits_supp) {
+                       mlx5hws_err(cd->ctx, "Unsupported flex_parser_ok_bits_supp capability\n");
+                       return -EOPNOTSUPP;
+               }
+
+               curr_fc = hws_definer_flex_parser_steering_ok_bits_handler(
+                               cd, caps->flex_parser_id_geneve_tlv_option_0);
+               if (!curr_fc)
+                       return -EINVAL;
+
+               HWS_CALC_HDR_SRC(fc, misc_parameters.geneve_tlv_option_0_exist);
+       }
+
+       if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port)) {
+               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_SOURCE_GVMI];
+               HWS_CALC_HDR_DST(curr_fc, source_qp_gvmi.source_gvmi);
+               curr_fc->tag_mask_set = &hws_definer_ones_set;
+               curr_fc->tag_set = HWS_IS_FLD_SET(match_param,
+                                                 misc_parameters.source_eswitch_owner_vhca_id) ?
+                                                 &hws_definer_set_source_gvmi_vhca_id :
+                                                 &hws_definer_set_source_gvmi;
+       } else {
+               if (HWS_IS_FLD_SET(match_param, misc_parameters.source_eswitch_owner_vhca_id)) {
+                       mlx5hws_err(cd->ctx,
+                                   "Unsupported source_eswitch_owner_vhca_id field usage\n");
+                       return -EOPNOTSUPP;
+               }
+       }
+
+       return 0;
+}
+
+static int
+hws_definer_conv_misc2(struct mlx5hws_definer_conv_data *cd,
+                      u32 *match_param)
+{
+       struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
+       struct mlx5hws_definer_fc *fc = cd->fc;
+       struct mlx5hws_definer_fc *curr_fc;
+
+       if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1a0, 0x8) ||
+           HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1b8, 0x8) ||
+           HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1c0, 0x40) ||
+           HWS_IS_FLD_SET(match_param, misc_parameters_2.macsec_syndrome) ||
+           HWS_IS_FLD_SET(match_param, misc_parameters_2.ipsec_syndrome)) {
+               mlx5hws_err(cd->ctx, "Unsupported misc2 parameters set\n");
+               return -EINVAL;
+       }
+
+       HWS_SET_HDR(fc, match_param, MPLS0_O,
+                   misc_parameters_2.outer_first_mpls, mpls_outer.mpls0_label);
+       HWS_SET_HDR(fc, match_param, MPLS0_I,
+                   misc_parameters_2.inner_first_mpls, mpls_inner.mpls0_label);
+       HWS_SET_HDR(fc, match_param, REG_0,
+                   misc_parameters_2.metadata_reg_c_0, registers.register_c_0);
+       HWS_SET_HDR(fc, match_param, REG_1,
+                   misc_parameters_2.metadata_reg_c_1, registers.register_c_1);
+       HWS_SET_HDR(fc, match_param, REG_2,
+                   misc_parameters_2.metadata_reg_c_2, registers.register_c_2);
+       HWS_SET_HDR(fc, match_param, REG_3,
+                   misc_parameters_2.metadata_reg_c_3, registers.register_c_3);
+       HWS_SET_HDR(fc, match_param, REG_4,
+                   misc_parameters_2.metadata_reg_c_4, registers.register_c_4);
+       HWS_SET_HDR(fc, match_param, REG_5,
+                   misc_parameters_2.metadata_reg_c_5, registers.register_c_5);
+       HWS_SET_HDR(fc, match_param, REG_6,
+                   misc_parameters_2.metadata_reg_c_6, registers.register_c_6);
+       HWS_SET_HDR(fc, match_param, REG_7,
+                   misc_parameters_2.metadata_reg_c_7, registers.register_c_7);
+       HWS_SET_HDR(fc, match_param, REG_A,
+                   misc_parameters_2.metadata_reg_a, metadata.general_purpose);
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_gre)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE;
+
+               if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)) {
+                       mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over gre parameters set\n");
+                       return -EOPNOTSUPP;
+               }
+
+               curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_gre);
+               if (!curr_fc)
+                       return -EINVAL;
+
+               HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_gre);
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_udp)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP;
+
+               if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)) {
+                       mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over udp parameters set\n");
+                       return -EOPNOTSUPP;
+               }
+
+               curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_udp);
+               if (!curr_fc)
+                       return -EINVAL;
+
+               HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_udp);
+       }
+
+       return 0;
+}
+
+static int
+hws_definer_conv_misc3(struct mlx5hws_definer_conv_data *cd, u32 *match_param)
+{
+       struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
+       struct mlx5hws_definer_fc *fc = cd->fc;
+       struct mlx5hws_definer_fc *curr_fc;
+       bool vxlan_gpe_flex_parser_enabled;
+
+       /* Check reserved and unsupported fields */
+       if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_80, 0x8) ||
+           HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_b0, 0x10) ||
+           HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_170, 0x10) ||
+           HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_1e0, 0x20)) {
+               mlx5hws_err(cd->ctx, "Unsupported misc3 parameters set\n");
+               return -EINVAL;
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_seq_num) ||
+           HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_ack_num)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_I;
+               HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM,
+                           misc_parameters_3.inner_tcp_seq_num, tcp_icmp.tcp_seq);
+               HWS_SET_HDR(fc, match_param, TCP_ACK_NUM,
+                           misc_parameters_3.inner_tcp_ack_num, tcp_icmp.tcp_ack);
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_seq_num) ||
+           HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_ack_num)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_O;
+               HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM,
+                           misc_parameters_3.outer_tcp_seq_num, tcp_icmp.tcp_seq);
+               HWS_SET_HDR(fc, match_param, TCP_ACK_NUM,
+                           misc_parameters_3.outer_tcp_ack_num, tcp_icmp.tcp_ack);
+       }
+
+       vxlan_gpe_flex_parser_enabled = caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_vni)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
+
+               if (!vxlan_gpe_flex_parser_enabled) {
+                       mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
+                       return -EOPNOTSUPP;
+               }
+
+               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI];
+               HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_vni,
+                            tunnel_header.tunnel_header_1);
+               curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, vni);
+               curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, vni);
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_next_protocol)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
+
+               if (!vxlan_gpe_flex_parser_enabled) {
+                       mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
+                       return -EOPNOTSUPP;
+               }
+
+               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO];
+               HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_next_protocol,
+                            tunnel_header.tunnel_header_0);
+               curr_fc->byte_off += MLX5_BYTE_OFF(header_vxlan_gpe, protocol);
+               curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, protocol);
+               curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, protocol);
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_flags)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
+
+               if (!vxlan_gpe_flex_parser_enabled) {
+                       mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
+                       return -EOPNOTSUPP;
+               }
+
+               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS];
+               HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_flags,
+                            tunnel_header.tunnel_header_0);
+               curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, flags);
+               curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, flags);
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_header_data) ||
+           HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) ||
+           HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4;
+
+               if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED)) {
+                       mlx5hws_err(cd->ctx, "Unsupported ICMPv4 flex parser\n");
+                       return -EOPNOTSUPP;
+               }
+
+               HWS_SET_HDR(fc, match_param, ICMP_DW3,
+                           misc_parameters_3.icmp_header_data, tcp_icmp.icmp_dw3);
+
+               if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) ||
+                   HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) {
+                       curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1];
+                       HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1);
+                       curr_fc->tag_set = &hws_definer_icmp_dw1_set;
+               }
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_header_data) ||
+           HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) ||
+           HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6;
+
+               if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED)) {
+                       mlx5hws_err(cd->ctx, "Unsupported ICMPv6 parser\n");
+                       return -EOPNOTSUPP;
+               }
+
+               HWS_SET_HDR(fc, match_param, ICMP_DW3,
+                           misc_parameters_3.icmpv6_header_data, tcp_icmp.icmp_dw3);
+
+               if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) ||
+                   HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) {
+                       curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1];
+                       HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1);
+                       curr_fc->tag_set = &hws_definer_icmpv6_dw1_set;
+               }
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.geneve_tlv_option_0_data)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+
+               curr_fc =
+                       hws_definer_flex_parser_handler(cd,
+                                                       caps->flex_parser_id_geneve_tlv_option_0);
+               if (!curr_fc)
+                       return -EINVAL;
+
+               HWS_CALC_HDR_SRC(fc, misc_parameters_3.geneve_tlv_option_0_data);
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_teid)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+               if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)) {
+                       mlx5hws_err(cd->ctx, "Unsupported GTPU TEID flex parser\n");
+                       return -EOPNOTSUPP;
+               }
+
+               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_TEID];
+               fc->tag_set = &hws_definer_generic_set;
+               fc->bit_mask = __mlx5_mask(header_gtp, teid);
+               fc->byte_off = caps->format_select_gtpu_dw_1 * DW_SIZE;
+               HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_teid);
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_type)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+               if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) {
+                       mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n");
+                       return -EOPNOTSUPP;
+               }
+
+               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE];
+               fc->tag_set = &hws_definer_generic_set;
+               fc->bit_mask = __mlx5_mask(header_gtp, msg_type);
+               fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_type);
+               fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
+               HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_type);
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_flags)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+               if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) {
+                       mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n");
+                       return -EOPNOTSUPP;
+               }
+
+               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE];
+               fc->tag_set = &hws_definer_generic_set;
+               fc->bit_mask = __mlx5_mask(header_gtp, msg_flags);
+               fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_flags);
+               fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
+               HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_flags);
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_2)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+               if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)) {
+                       mlx5hws_err(cd->ctx, "Unsupported GTPU DW2 flex parser\n");
+                       return -EOPNOTSUPP;
+               }
+
+               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW2];
+               curr_fc->tag_set = &hws_definer_generic_set;
+               curr_fc->bit_mask = -1;
+               curr_fc->byte_off = caps->format_select_gtpu_dw_2 * DW_SIZE;
+               HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_2);
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_first_ext_dw_0)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+               if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)) {
+                       mlx5hws_err(cd->ctx, "Unsupported GTPU first EXT DW0 flex parser\n");
+                       return -EOPNOTSUPP;
+               }
+
+               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0];
+               curr_fc->tag_set = &hws_definer_generic_set;
+               curr_fc->bit_mask = -1;
+               curr_fc->byte_off = caps->format_select_gtpu_ext_dw_0 * DW_SIZE;
+               HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_first_ext_dw_0);
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_0)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+               if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)) {
+                       mlx5hws_err(cd->ctx, "Unsupported GTPU DW0 flex parser\n");
+                       return -EOPNOTSUPP;
+               }
+
+               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW0];
+               curr_fc->tag_set = &hws_definer_generic_set;
+               curr_fc->bit_mask = -1;
+               curr_fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
+               HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_0);
+       }
+
+       return 0;
+}
+
+static int
+hws_definer_conv_misc4(struct mlx5hws_definer_conv_data *cd,
+                      u32 *match_param)
+{
+       bool parser_is_used[HWS_NUM_OF_FLEX_PARSERS] = {};
+       struct mlx5hws_definer_fc *fc;
+       u32 id, value;
+
+       if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_4.reserved_at_100, 0x100)) {
+               mlx5hws_err(cd->ctx, "Unsupported misc4 parameters set\n");
+               return -EINVAL;
+       }
+
+       id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_0);
+       value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_0);
+       fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+       if (!fc)
+               return -EINVAL;
+
+       HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_0);
+
+       id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_1);
+       value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_1);
+       fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+       if (!fc)
+               return -EINVAL;
+
+       HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_1);
+
+       id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_2);
+       value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_2);
+       fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+       if (!fc)
+               return -EINVAL;
+
+       HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_2);
+
+       id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_3);
+       value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_3);
+       fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+       if (!fc)
+               return -EINVAL;
+
+       HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_3);
+
+       return 0;
+}
+
+static int
+hws_definer_conv_misc5(struct mlx5hws_definer_conv_data *cd,
+                      u32 *match_param)
+{
+       struct mlx5hws_definer_fc *fc = cd->fc;
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_0) ||
+           HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_1) ||
+           HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_2) ||
+           HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_3) ||
+           HWS_IS_FLD_SET_SZ(match_param, misc_parameters_5.reserved_at_100, 0x100)) {
+               mlx5hws_err(cd->ctx, "Unsupported misc5 parameters set\n");
+               return -EINVAL;
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_0)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1;
+               HWS_SET_HDR(fc, match_param, TNL_HDR_0,
+                           misc_parameters_5.tunnel_header_0, tunnel_header.tunnel_header_0);
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_1)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1;
+               HWS_SET_HDR(fc, match_param, TNL_HDR_1,
+                           misc_parameters_5.tunnel_header_1, tunnel_header.tunnel_header_1);
+       }
+
+       if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_2)) {
+               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2;
+               HWS_SET_HDR(fc, match_param, TNL_HDR_2,
+                           misc_parameters_5.tunnel_header_2, tunnel_header.tunnel_header_2);
+       }
+
+       HWS_SET_HDR(fc, match_param, TNL_HDR_3,
+                   misc_parameters_5.tunnel_header_3, tunnel_header.tunnel_header_3);
+
+       return 0;
+}
+
+static int hws_definer_get_fc_size(struct mlx5hws_definer_fc *fc)
+{
+       u32 fc_sz = 0;
+       int i;
+
+       /* For empty matcher, ZERO_SIZE_PTR is returned */
+       if (fc == ZERO_SIZE_PTR)
+               return 0;
+
+       for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++)
+               if (fc[i].tag_set)
+                       fc_sz++;
+       return fc_sz;
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_alloc_compressed_fc(struct mlx5hws_definer_fc *fc)
+{
+       struct mlx5hws_definer_fc *compressed_fc = NULL;
+       u32 definer_size = hws_definer_get_fc_size(fc);
+       u32 fc_sz = 0;
+       int i;
+
+       compressed_fc = kcalloc(definer_size, sizeof(*compressed_fc), GFP_KERNEL);
+       if (!compressed_fc)
+               return NULL;
+
+       /* For empty matcher, ZERO_SIZE_PTR is returned */
+       if (!definer_size)
+               return compressed_fc;
+
+       for (i = 0, fc_sz = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) {
+               if (!fc[i].tag_set)
+                       continue;
+
+               fc[i].fname = i;
+               memcpy(&compressed_fc[fc_sz++], &fc[i], sizeof(*compressed_fc));
+       }
+
+       return compressed_fc;
+}
+
+static void
+hws_definer_set_hl(u8 *hl, struct mlx5hws_definer_fc *fc)
+{
+       int i;
+
+       /* nothing to do for empty matcher */
+       if (fc == ZERO_SIZE_PTR)
+               return;
+
+       for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) {
+               if (!fc[i].tag_set)
+                       continue;
+
+               HWS_SET32(hl, -1, fc[i].byte_off, fc[i].bit_off, fc[i].bit_mask);
+       }
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_alloc_fc(struct mlx5hws_context *ctx,
+                    size_t len)
+{
+       struct mlx5hws_definer_fc *fc;
+       int i;
+
+       fc = kcalloc(len, sizeof(*fc), GFP_KERNEL);
+       if (!fc)
+               return NULL;
+
+       for (i = 0; i < len; i++)
+               fc[i].ctx = ctx;
+
+       return fc;
+}
+
+static int
+hws_definer_conv_match_params_to_hl(struct mlx5hws_context *ctx,
+                                   struct mlx5hws_match_template *mt,
+                                   u8 *hl)
+{
+       struct mlx5hws_definer_conv_data cd = {0};
+       struct mlx5hws_definer_fc *fc;
+       int ret;
+
+       fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX);
+       if (!fc)
+               return -ENOMEM;
+
+       cd.fc = fc;
+       cd.ctx = ctx;
+
+       if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC6) {
+               mlx5hws_err(ctx, "Unsupported match_criteria_enable provided\n");
+               ret = -EOPNOTSUPP;
+               goto err_free_fc;
+       }
+
+       if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) {
+               ret = hws_definer_conv_outer(&cd, mt->match_param);
+               if (ret)
+                       goto err_free_fc;
+       }
+
+       if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) {
+               ret = hws_definer_conv_inner(&cd, mt->match_param);
+               if (ret)
+                       goto err_free_fc;
+       }
+
+       if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) {
+               ret = hws_definer_conv_misc(&cd, mt->match_param);
+               if (ret)
+                       goto err_free_fc;
+       }
+
+       if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) {
+               ret = hws_definer_conv_misc2(&cd, mt->match_param);
+               if (ret)
+                       goto err_free_fc;
+       }
+
+       if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) {
+               ret = hws_definer_conv_misc3(&cd, mt->match_param);
+               if (ret)
+                       goto err_free_fc;
+       }
+
+       if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) {
+               ret = hws_definer_conv_misc4(&cd, mt->match_param);
+               if (ret)
+                       goto err_free_fc;
+       }
+
+       if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) {
+               ret = hws_definer_conv_misc5(&cd, mt->match_param);
+               if (ret)
+                       goto err_free_fc;
+       }
+
+       /* Check there is no conflicted fields set together */
+       ret = hws_definer_check_match_flags(&cd);
+       if (ret)
+               goto err_free_fc;
+
+       /* Allocate fc array on mt */
+       mt->fc = hws_definer_alloc_compressed_fc(fc);
+       if (!mt->fc) {
+               mlx5hws_err(ctx,
+                           "Convert match params: failed to set field copy to match template\n");
+               ret = -ENOMEM;
+               goto err_free_fc;
+       }
+       mt->fc_sz = hws_definer_get_fc_size(fc);
+
+       /* Fill in headers layout */
+       hws_definer_set_hl(hl, fc);
+
+       kfree(fc);
+       return 0;
+
+err_free_fc:
+       kfree(fc);
+       return ret;
+}
+
+struct mlx5hws_definer_fc *
+mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
+                                                  u8 match_criteria_enable,
+                                                  u32 *match_param,
+                                                  int *fc_sz)
+{
+       struct mlx5hws_definer_fc *compressed_fc = NULL;
+       struct mlx5hws_definer_conv_data cd = {0};
+       struct mlx5hws_definer_fc *fc;
+       int ret;
+
+       fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX);
+       if (!fc)
+               return NULL;
+
+       cd.fc = fc;
+       cd.ctx = ctx;
+
+       if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) {
+               ret = hws_definer_conv_outer(&cd, match_param);
+               if (ret)
+                       goto err_free_fc;
+       }
+
+       if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) {
+               ret = hws_definer_conv_inner(&cd, match_param);
+               if (ret)
+                       goto err_free_fc;
+       }
+
+       if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) {
+               ret = hws_definer_conv_misc(&cd, match_param);
+               if (ret)
+                       goto err_free_fc;
+       }
+
+       if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) {
+               ret = hws_definer_conv_misc2(&cd, match_param);
+               if (ret)
+                       goto err_free_fc;
+       }
+
+       if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) {
+               ret = hws_definer_conv_misc3(&cd, match_param);
+               if (ret)
+                       goto err_free_fc;
+       }
+
+       if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) {
+               ret = hws_definer_conv_misc4(&cd, match_param);
+               if (ret)
+                       goto err_free_fc;
+       }
+
+       if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) {
+               ret = hws_definer_conv_misc5(&cd, match_param);
+               if (ret)
+                       goto err_free_fc;
+       }
+
+       /* Allocate fc array on mt */
+       compressed_fc = hws_definer_alloc_compressed_fc(fc);
+       if (!compressed_fc) {
+               mlx5hws_err(ctx,
+                           "Convert to compressed fc: failed to set field copy to match template\n");
+               goto err_free_fc;
+       }
+       *fc_sz = hws_definer_get_fc_size(fc);
+
+err_free_fc:
+       kfree(fc);
+       return compressed_fc;
+}
+
+static int
+hws_definer_find_byte_in_tag(struct mlx5hws_definer *definer,
+                            u32 hl_byte_off,
+                            u32 *tag_byte_off)
+{
+       int i, dw_to_scan;
+       u8 byte_offset;
+
+       /* Avoid accessing unused DW selectors */
+       dw_to_scan = mlx5hws_definer_is_jumbo(definer) ?
+               DW_SELECTORS : DW_SELECTORS_MATCH;
+
+       /* Add offset since each DW covers multiple BYTEs */
+       byte_offset = hl_byte_off % DW_SIZE;
+       for (i = 0; i < dw_to_scan; i++) {
+               if (definer->dw_selector[i] == hl_byte_off / DW_SIZE) {
+                       *tag_byte_off = byte_offset + DW_SIZE * (DW_SELECTORS - i - 1);
+                       return 0;
+               }
+       }
+
+       /* Add offset to skip DWs in definer */
+       byte_offset = DW_SIZE * DW_SELECTORS;
+       /* Iterate in reverse since the code uses bytes from 7 -> 0 */
+       for (i = BYTE_SELECTORS; i-- > 0 ;) {
+               if (definer->byte_selector[i] == hl_byte_off) {
+                       *tag_byte_off = byte_offset + (BYTE_SELECTORS - i - 1);
+                       return 0;
+               }
+       }
+
+       return -EINVAL;
+}
+
+static int
+hws_definer_fc_bind(struct mlx5hws_definer *definer,
+                   struct mlx5hws_definer_fc *fc,
+                   u32 fc_sz)
+{
+       u32 tag_offset = 0;
+       int ret, byte_diff;
+       u32 i;
+
+       for (i = 0; i < fc_sz; i++) {
+               /* Map header layout byte offset to byte offset in tag */
+               ret = hws_definer_find_byte_in_tag(definer, fc->byte_off, &tag_offset);
+               if (ret)
+                       return ret;
+
+               /* Move setter based on the location in the definer */
+               byte_diff = fc->byte_off % DW_SIZE - tag_offset % DW_SIZE;
+               fc->bit_off = fc->bit_off + byte_diff * BITS_IN_BYTE;
+
+               /* Update offset in headers layout to offset in tag */
+               fc->byte_off = tag_offset;
+               fc++;
+       }
+
+       return 0;
+}
+
+static bool
+hws_definer_best_hl_fit_recu(struct mlx5hws_definer_sel_ctrl *ctrl,
+                            u32 cur_dw,
+                            u32 *data)
+{
+       u8 bytes_set;
+       int byte_idx;
+       bool ret;
+       int i;
+
+       /* Reached end, nothing left to do */
+       if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
+               return true;
+
+       /* No data set, can skip to next DW */
+       while (!*data) {
+               cur_dw++;
+               data++;
+
+               /* Reached end, nothing left to do */
+               if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
+                       return true;
+       }
+
+       /* Used all DW selectors and Byte selectors, no possible solution */
+       if (ctrl->allowed_full_dw == ctrl->used_full_dw &&
+           ctrl->allowed_lim_dw == ctrl->used_lim_dw &&
+           ctrl->allowed_bytes == ctrl->used_bytes)
+               return false;
+
+       /* Try to use limited DW selectors */
+       if (ctrl->allowed_lim_dw > ctrl->used_lim_dw && cur_dw < 64) {
+               ctrl->lim_dw_selector[ctrl->used_lim_dw++] = cur_dw;
+
+               ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
+               if (ret)
+                       return ret;
+
+               ctrl->lim_dw_selector[--ctrl->used_lim_dw] = 0;
+       }
+
+       /* Try to use DW selectors */
+       if (ctrl->allowed_full_dw > ctrl->used_full_dw) {
+               ctrl->full_dw_selector[ctrl->used_full_dw++] = cur_dw;
+
+               ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
+               if (ret)
+                       return ret;
+
+               ctrl->full_dw_selector[--ctrl->used_full_dw] = 0;
+       }
+
+       /* No byte selector for offset bigger than 255 */
+       if (cur_dw * DW_SIZE > 255)
+               return false;
+
+       bytes_set = !!(0x000000ff & *data) +
+                   !!(0x0000ff00 & *data) +
+                   !!(0x00ff0000 & *data) +
+                   !!(0xff000000 & *data);
+
+       /* Check if there are enough byte selectors left */
+       if (bytes_set + ctrl->used_bytes > ctrl->allowed_bytes)
+               return false;
+
+       /* Try to use Byte selectors */
+       for (i = 0; i < DW_SIZE; i++)
+               if ((0xff000000 >> (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) {
+                       /* Use byte selectors high to low */
+                       byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
+                       ctrl->byte_selector[byte_idx] = cur_dw * DW_SIZE + i;
+                       ctrl->used_bytes++;
+               }
+
+       ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < DW_SIZE; i++)
+               if ((0xff << (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) {
+                       ctrl->used_bytes--;
+                       byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
+                       ctrl->byte_selector[byte_idx] = 0;
+               }
+
+       return false;
+}
+
+static void
+hws_definer_copy_sel_ctrl(struct mlx5hws_definer_sel_ctrl *ctrl,
+                         struct mlx5hws_definer *definer)
+{
+       memcpy(definer->byte_selector, ctrl->byte_selector, ctrl->allowed_bytes);
+       memcpy(definer->dw_selector, ctrl->full_dw_selector, ctrl->allowed_full_dw);
+       memcpy(definer->dw_selector + ctrl->allowed_full_dw,
+              ctrl->lim_dw_selector, ctrl->allowed_lim_dw);
+}
+
+static int
+hws_definer_find_best_match_fit(struct mlx5hws_context *ctx,
+                               struct mlx5hws_definer *definer,
+                               u8 *hl)
+{
+       struct mlx5hws_definer_sel_ctrl ctrl = {0};
+       bool found;
+
+       /* Try to create a match definer */
+       ctrl.allowed_full_dw = DW_SELECTORS_MATCH;
+       ctrl.allowed_lim_dw = 0;
+       ctrl.allowed_bytes = BYTE_SELECTORS;
+
+       found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl);
+       if (found) {
+               hws_definer_copy_sel_ctrl(&ctrl, definer);
+               definer->type = MLX5HWS_DEFINER_TYPE_MATCH;
+               return 0;
+       }
+
+       /* Try to create a full/limited jumbo definer */
+       ctrl.allowed_full_dw = ctx->caps->full_dw_jumbo_support ? DW_SELECTORS :
+                                                                 DW_SELECTORS_MATCH;
+       ctrl.allowed_lim_dw = ctx->caps->full_dw_jumbo_support ? 0 :
+                                                                DW_SELECTORS_LIMITED;
+       ctrl.allowed_bytes = BYTE_SELECTORS;
+
+       found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl);
+       if (found) {
+               hws_definer_copy_sel_ctrl(&ctrl, definer);
+               definer->type = MLX5HWS_DEFINER_TYPE_JUMBO;
+               return 0;
+       }
+
+       return -E2BIG;
+}
+
+static void
+hws_definer_create_tag_mask(u32 *match_param,
+                           struct mlx5hws_definer_fc *fc,
+                           u32 fc_sz,
+                           u8 *tag)
+{
+       u32 i;
+
+       for (i = 0; i < fc_sz; i++) {
+               if (fc->tag_mask_set)
+                       fc->tag_mask_set(fc, match_param, tag);
+               else
+                       fc->tag_set(fc, match_param, tag);
+               fc++;
+       }
+}
+
+void mlx5hws_definer_create_tag(u32 *match_param,
+                               struct mlx5hws_definer_fc *fc,
+                               u32 fc_sz,
+                               u8 *tag)
+{
+       u32 i;
+
+       for (i = 0; i < fc_sz; i++) {
+               fc->tag_set(fc, match_param, tag);
+               fc++;
+       }
+}
+
+int mlx5hws_definer_get_id(struct mlx5hws_definer *definer)
+{
+       return definer->obj_id;
+}
+
+int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a,
+                           struct mlx5hws_definer *definer_b)
+{
+       int i;
+
+       /* Future: Optimize by comparing selectors with valid mask only */
+       for (i = 0; i < BYTE_SELECTORS; i++)
+               if (definer_a->byte_selector[i] != definer_b->byte_selector[i])
+                       return 1;
+
+       for (i = 0; i < DW_SELECTORS; i++)
+               if (definer_a->dw_selector[i] != definer_b->dw_selector[i])
+                       return 1;
+
+       for (i = 0; i < MLX5HWS_JUMBO_TAG_SZ; i++)
+               if (definer_a->mask.jumbo[i] != definer_b->mask.jumbo[i])
+                       return 1;
+
+       return 0;
+}
+
+int
+mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
+                           struct mlx5hws_match_template *mt,
+                           struct mlx5hws_definer *match_definer)
+{
+       u8 *match_hl;
+       int ret;
+
+       /* Union header-layout (hl) is used for creating a single definer
+        * field layout used with different bitmasks for hash and match.
+        */
+       match_hl = kzalloc(MLX5_ST_SZ_BYTES(definer_hl), GFP_KERNEL);
+       if (!match_hl)
+               return -ENOMEM;
+
+       /* Convert all mt items to header layout (hl)
+        * and allocate the match and range field copy array (fc & fcr).
+        */
+       ret = hws_definer_conv_match_params_to_hl(ctx, mt, match_hl);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to convert items to header layout\n");
+               goto free_match_hl;
+       }
+
+       /* Find the match definer layout for header layout match union */
+       ret = hws_definer_find_best_match_fit(ctx, match_definer, match_hl);
+       if (ret) {
+               if (ret == -E2BIG)
+                       mlx5hws_dbg(ctx,
+                                   "Failed to create match definer from header layout - E2BIG\n");
+               else
+                       mlx5hws_err(ctx,
+                                   "Failed to create match definer from header layout (%d)\n",
+                                   ret);
+               goto free_fc;
+       }
+
+       kfree(match_hl);
+       return 0;
+
+free_fc:
+       kfree(mt->fc);
+free_match_hl:
+       kfree(match_hl);
+       return ret;
+}
+
+int mlx5hws_definer_init_cache(struct mlx5hws_definer_cache **cache)
+{
+       struct mlx5hws_definer_cache *new_cache;
+
+       new_cache = kzalloc(sizeof(*new_cache), GFP_KERNEL);
+       if (!new_cache)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&new_cache->list_head);
+       *cache = new_cache;
+
+       return 0;
+}
+
+void mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache *cache)
+{
+       kfree(cache);
+}
+
+int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx,
+                           struct mlx5hws_definer *definer)
+{
+       struct mlx5hws_definer_cache *cache = ctx->definer_cache;
+       struct mlx5hws_cmd_definer_create_attr def_attr = {0};
+       struct mlx5hws_definer_cache_item *cached_definer;
+       u32 obj_id;
+       int ret;
+
+       /* Search definer cache for requested definer */
+       list_for_each_entry(cached_definer, &cache->list_head, list_node) {
+               if (mlx5hws_definer_compare(&cached_definer->definer, definer))
+                       continue;
+
+               /* Reuse definer and set LRU (move to be first in the list) */
+               list_del_init(&cached_definer->list_node);
+               list_add(&cached_definer->list_node, &cache->list_head);
+               cached_definer->refcount++;
+               return cached_definer->definer.obj_id;
+       }
+
+       /* Allocate and create definer based on the bitmask tag */
+       def_attr.match_mask = definer->mask.jumbo;
+       def_attr.dw_selector = definer->dw_selector;
+       def_attr.byte_selector = definer->byte_selector;
+
+       ret = mlx5hws_cmd_definer_create(ctx->mdev, &def_attr, &obj_id);
+       if (ret)
+               return -1;
+
+       cached_definer = kzalloc(sizeof(*cached_definer), GFP_KERNEL);
+       if (!cached_definer)
+               goto free_definer_obj;
+
+       memcpy(&cached_definer->definer, definer, sizeof(*definer));
+       cached_definer->definer.obj_id = obj_id;
+       cached_definer->refcount = 1;
+       list_add(&cached_definer->list_node, &cache->list_head);
+
+       return obj_id;
+
+free_definer_obj:
+       mlx5hws_cmd_definer_destroy(ctx->mdev, obj_id);
+       return -1;
+}
+
+static void
+hws_definer_put_obj(struct mlx5hws_context *ctx, u32 obj_id)
+{
+       struct mlx5hws_definer_cache_item *cached_definer;
+
+       list_for_each_entry(cached_definer, &ctx->definer_cache->list_head, list_node) {
+               if (cached_definer->definer.obj_id != obj_id)
+                       continue;
+
+               /* Object found */
+               if (--cached_definer->refcount)
+                       return;
+
+               list_del_init(&cached_definer->list_node);
+               mlx5hws_cmd_definer_destroy(ctx->mdev, cached_definer->definer.obj_id);
+               kfree(cached_definer);
+               return;
+       }
+
+       /* Programming error, object must be part of cache */
+       pr_warn("HWS: failed putting definer object\n");
+}
+
+static struct mlx5hws_definer *
+hws_definer_alloc(struct mlx5hws_context *ctx,
+                 struct mlx5hws_definer_fc *fc,
+                 int fc_sz,
+                 u32 *match_param,
+                 struct mlx5hws_definer *layout,
+                 bool bind_fc)
+{
+       struct mlx5hws_definer *definer;
+       int ret;
+
+       definer = kmemdup(layout, sizeof(*definer), GFP_KERNEL);
+       if (!definer)
+               return NULL;
+
+       /* Align field copy array based on given layout */
+       if (bind_fc) {
+               ret = hws_definer_fc_bind(definer, fc, fc_sz);
+               if (ret) {
+                       mlx5hws_err(ctx, "Failed to bind field copy to definer\n");
+                       goto free_definer;
+               }
+       }
+
+       /* Create the tag mask used for definer creation */
+       hws_definer_create_tag_mask(match_param, fc, fc_sz, definer->mask.jumbo);
+
+       ret = mlx5hws_definer_get_obj(ctx, definer);
+       if (ret < 0)
+               goto free_definer;
+
+       definer->obj_id = ret;
+       return definer;
+
+free_definer:
+       kfree(definer);
+       return NULL;
+}
+
+void mlx5hws_definer_free(struct mlx5hws_context *ctx,
+                         struct mlx5hws_definer *definer)
+{
+       hws_definer_put_obj(ctx, definer->obj_id);
+       kfree(definer);
+}
+
+static int
+hws_definer_mt_match_init(struct mlx5hws_context *ctx,
+                         struct mlx5hws_match_template *mt,
+                         struct mlx5hws_definer *match_layout)
+{
+       /* Create mandatory match definer */
+       mt->definer = hws_definer_alloc(ctx,
+                                       mt->fc,
+                                       mt->fc_sz,
+                                       mt->match_param,
+                                       match_layout,
+                                       true);
+       if (!mt->definer) {
+               mlx5hws_err(ctx, "Failed to create match definer\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void
+hws_definer_mt_match_uninit(struct mlx5hws_context *ctx,
+                           struct mlx5hws_match_template *mt)
+{
+       mlx5hws_definer_free(ctx, mt->definer);
+}
+
+int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx,
+                           struct mlx5hws_match_template *mt)
+{
+       struct mlx5hws_definer match_layout = {0};
+       int ret;
+
+       ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
+               return ret;
+       }
+
+       /* Calculate definers needed for exact match */
+       ret = hws_definer_mt_match_init(ctx, mt, &match_layout);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to init match definers\n");
+               goto free_fc;
+       }
+
+       return 0;
+
+free_fc:
+       kfree(mt->fc);
+       return ret;
+}
+
+void mlx5hws_definer_mt_uninit(struct mlx5hws_context *ctx,
+                              struct mlx5hws_match_template *mt)
+{
+       hws_definer_mt_match_uninit(ctx, mt);
+       kfree(mt->fc);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
new file mode 100644 (file)
index 0000000..9432d50
--- /dev/null
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef HWS_DEFINER_H_
+#define HWS_DEFINER_H_
+
+/* Max available selecotrs */
+#define DW_SELECTORS 9
+#define BYTE_SELECTORS 8
+
+/* Selectors based on match TAG */
+#define DW_SELECTORS_MATCH 6
+#define DW_SELECTORS_LIMITED 3
+
+/* Selectors based on range TAG */
+#define DW_SELECTORS_RANGE 2
+#define BYTE_SELECTORS_RANGE 8
+
+#define HWS_NUM_OF_FLEX_PARSERS 8
+
+enum mlx5hws_definer_fname {
+       MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O,
+       MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I,
+       MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O,
+       MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I,
+       MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O,
+       MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I,
+       MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O,
+       MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I,
+       MLX5HWS_DEFINER_FNAME_ETH_TYPE_O,
+       MLX5HWS_DEFINER_FNAME_ETH_TYPE_I,
+       MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O,
+       MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I,
+       MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O,
+       MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I,
+       MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_O,
+       MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_I,
+       MLX5HWS_DEFINER_FNAME_VLAN_CFI_O,
+       MLX5HWS_DEFINER_FNAME_VLAN_CFI_I,
+       MLX5HWS_DEFINER_FNAME_VLAN_ID_O,
+       MLX5HWS_DEFINER_FNAME_VLAN_ID_I,
+       MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O,
+       MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I,
+       MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_O,
+       MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_I,
+       MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_O,
+       MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_I,
+       MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_O,
+       MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_I,
+       MLX5HWS_DEFINER_FNAME_IPV4_IHL_O,
+       MLX5HWS_DEFINER_FNAME_IPV4_IHL_I,
+       MLX5HWS_DEFINER_FNAME_IP_DSCP_O,
+       MLX5HWS_DEFINER_FNAME_IP_DSCP_I,
+       MLX5HWS_DEFINER_FNAME_IP_ECN_O,
+       MLX5HWS_DEFINER_FNAME_IP_ECN_I,
+       MLX5HWS_DEFINER_FNAME_IP_TTL_O,
+       MLX5HWS_DEFINER_FNAME_IP_TTL_I,
+       MLX5HWS_DEFINER_FNAME_IPV4_DST_O,
+       MLX5HWS_DEFINER_FNAME_IPV4_DST_I,
+       MLX5HWS_DEFINER_FNAME_IPV4_SRC_O,
+       MLX5HWS_DEFINER_FNAME_IPV4_SRC_I,
+       MLX5HWS_DEFINER_FNAME_IP_VERSION_O,
+       MLX5HWS_DEFINER_FNAME_IP_VERSION_I,
+       MLX5HWS_DEFINER_FNAME_IP_FRAG_O,
+       MLX5HWS_DEFINER_FNAME_IP_FRAG_I,
+       MLX5HWS_DEFINER_FNAME_IP_LEN_O,
+       MLX5HWS_DEFINER_FNAME_IP_LEN_I,
+       MLX5HWS_DEFINER_FNAME_IP_TOS_O,
+       MLX5HWS_DEFINER_FNAME_IP_TOS_I,
+       MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_O,
+       MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_I,
+       MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O,
+       MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O,
+       MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O,
+       MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O,
+       MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I,
+       MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I,
+       MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I,
+       MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I,
+       MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O,
+       MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O,
+       MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O,
+       MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O,
+       MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I,
+       MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I,
+       MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I,
+       MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I,
+       MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_O,
+       MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_I,
+       MLX5HWS_DEFINER_FNAME_L4_SPORT_O,
+       MLX5HWS_DEFINER_FNAME_L4_SPORT_I,
+       MLX5HWS_DEFINER_FNAME_L4_DPORT_O,
+       MLX5HWS_DEFINER_FNAME_L4_DPORT_I,
+       MLX5HWS_DEFINER_FNAME_TCP_FLAGS_I,
+       MLX5HWS_DEFINER_FNAME_TCP_FLAGS_O,
+       MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM,
+       MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM,
+       MLX5HWS_DEFINER_FNAME_GTP_TEID,
+       MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE,
+       MLX5HWS_DEFINER_FNAME_GTP_EXT_FLAG,
+       MLX5HWS_DEFINER_FNAME_GTP_NEXT_EXT_HDR,
+       MLX5HWS_DEFINER_FNAME_GTP_EXT_HDR_PDU,
+       MLX5HWS_DEFINER_FNAME_GTP_EXT_HDR_QFI,
+       MLX5HWS_DEFINER_FNAME_GTPU_DW0,
+       MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0,
+       MLX5HWS_DEFINER_FNAME_GTPU_DW2,
+       MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0,
+       MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1,
+       MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2,
+       MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3,
+       MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4,
+       MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5,
+       MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6,
+       MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7,
+       MLX5HWS_DEFINER_FNAME_VPORT_REG_C_0,
+       MLX5HWS_DEFINER_FNAME_VXLAN_FLAGS,
+       MLX5HWS_DEFINER_FNAME_VXLAN_VNI,
+       MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS,
+       MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD0,
+       MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO,
+       MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI,
+       MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD1,
+       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN,
+       MLX5HWS_DEFINER_FNAME_GENEVE_OAM,
+       MLX5HWS_DEFINER_FNAME_GENEVE_PROTO,
+       MLX5HWS_DEFINER_FNAME_GENEVE_VNI,
+       MLX5HWS_DEFINER_FNAME_SOURCE_QP,
+       MLX5HWS_DEFINER_FNAME_SOURCE_GVMI,
+       MLX5HWS_DEFINER_FNAME_REG_0,
+       MLX5HWS_DEFINER_FNAME_REG_1,
+       MLX5HWS_DEFINER_FNAME_REG_2,
+       MLX5HWS_DEFINER_FNAME_REG_3,
+       MLX5HWS_DEFINER_FNAME_REG_4,
+       MLX5HWS_DEFINER_FNAME_REG_5,
+       MLX5HWS_DEFINER_FNAME_REG_6,
+       MLX5HWS_DEFINER_FNAME_REG_7,
+       MLX5HWS_DEFINER_FNAME_REG_8,
+       MLX5HWS_DEFINER_FNAME_REG_9,
+       MLX5HWS_DEFINER_FNAME_REG_10,
+       MLX5HWS_DEFINER_FNAME_REG_11,
+       MLX5HWS_DEFINER_FNAME_REG_A,
+       MLX5HWS_DEFINER_FNAME_REG_B,
+       MLX5HWS_DEFINER_FNAME_GRE_KEY_PRESENT,
+       MLX5HWS_DEFINER_FNAME_GRE_C,
+       MLX5HWS_DEFINER_FNAME_GRE_K,
+       MLX5HWS_DEFINER_FNAME_GRE_S,
+       MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL,
+       MLX5HWS_DEFINER_FNAME_GRE_OPT_KEY,
+       MLX5HWS_DEFINER_FNAME_GRE_OPT_SEQ,
+       MLX5HWS_DEFINER_FNAME_GRE_OPT_CHECKSUM,
+       MLX5HWS_DEFINER_FNAME_INTEGRITY_O,
+       MLX5HWS_DEFINER_FNAME_INTEGRITY_I,
+       MLX5HWS_DEFINER_FNAME_ICMP_DW1,
+       MLX5HWS_DEFINER_FNAME_ICMP_DW2,
+       MLX5HWS_DEFINER_FNAME_ICMP_DW3,
+       MLX5HWS_DEFINER_FNAME_IPSEC_SPI,
+       MLX5HWS_DEFINER_FNAME_IPSEC_SEQUENCE_NUMBER,
+       MLX5HWS_DEFINER_FNAME_IPSEC_SYNDROME,
+       MLX5HWS_DEFINER_FNAME_MPLS0_O,
+       MLX5HWS_DEFINER_FNAME_MPLS1_O,
+       MLX5HWS_DEFINER_FNAME_MPLS2_O,
+       MLX5HWS_DEFINER_FNAME_MPLS3_O,
+       MLX5HWS_DEFINER_FNAME_MPLS4_O,
+       MLX5HWS_DEFINER_FNAME_MPLS0_I,
+       MLX5HWS_DEFINER_FNAME_MPLS1_I,
+       MLX5HWS_DEFINER_FNAME_MPLS2_I,
+       MLX5HWS_DEFINER_FNAME_MPLS3_I,
+       MLX5HWS_DEFINER_FNAME_MPLS4_I,
+       MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK,
+       MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK,
+       MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK,
+       MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK,
+       MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK,
+       MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK,
+       MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK,
+       MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK,
+       MLX5HWS_DEFINER_FNAME_OKS2_MPLS0_O,
+       MLX5HWS_DEFINER_FNAME_OKS2_MPLS1_O,
+       MLX5HWS_DEFINER_FNAME_OKS2_MPLS2_O,
+       MLX5HWS_DEFINER_FNAME_OKS2_MPLS3_O,
+       MLX5HWS_DEFINER_FNAME_OKS2_MPLS4_O,
+       MLX5HWS_DEFINER_FNAME_OKS2_MPLS0_I,
+       MLX5HWS_DEFINER_FNAME_OKS2_MPLS1_I,
+       MLX5HWS_DEFINER_FNAME_OKS2_MPLS2_I,
+       MLX5HWS_DEFINER_FNAME_OKS2_MPLS3_I,
+       MLX5HWS_DEFINER_FNAME_OKS2_MPLS4_I,
+       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_0,
+       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_1,
+       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_2,
+       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_3,
+       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_4,
+       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_5,
+       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_6,
+       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_7,
+       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_0,
+       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_1,
+       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_2,
+       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_3,
+       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_4,
+       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_5,
+       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_6,
+       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_7,
+       MLX5HWS_DEFINER_FNAME_IB_L4_OPCODE,
+       MLX5HWS_DEFINER_FNAME_IB_L4_QPN,
+       MLX5HWS_DEFINER_FNAME_IB_L4_A,
+       MLX5HWS_DEFINER_FNAME_RANDOM_NUM,
+       MLX5HWS_DEFINER_FNAME_PTYPE_L2_O,
+       MLX5HWS_DEFINER_FNAME_PTYPE_L2_I,
+       MLX5HWS_DEFINER_FNAME_PTYPE_L3_O,
+       MLX5HWS_DEFINER_FNAME_PTYPE_L3_I,
+       MLX5HWS_DEFINER_FNAME_PTYPE_L4_O,
+       MLX5HWS_DEFINER_FNAME_PTYPE_L4_I,
+       MLX5HWS_DEFINER_FNAME_PTYPE_L4_EXT_O,
+       MLX5HWS_DEFINER_FNAME_PTYPE_L4_EXT_I,
+       MLX5HWS_DEFINER_FNAME_PTYPE_FRAG_O,
+       MLX5HWS_DEFINER_FNAME_PTYPE_FRAG_I,
+       MLX5HWS_DEFINER_FNAME_TNL_HDR_0,
+       MLX5HWS_DEFINER_FNAME_TNL_HDR_1,
+       MLX5HWS_DEFINER_FNAME_TNL_HDR_2,
+       MLX5HWS_DEFINER_FNAME_TNL_HDR_3,
+       MLX5HWS_DEFINER_FNAME_MAX,
+};
+
+enum mlx5hws_definer_match_criteria {
+       MLX5HWS_DEFINER_MATCH_CRITERIA_EMPTY = 0,
+       MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER = 1 << 0,
+       MLX5HWS_DEFINER_MATCH_CRITERIA_MISC = 1 << 1,
+       MLX5HWS_DEFINER_MATCH_CRITERIA_INNER = 1 << 2,
+       MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2 = 1 << 3,
+       MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3 = 1 << 4,
+       MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4 = 1 << 5,
+       MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5 = 1 << 6,
+       MLX5HWS_DEFINER_MATCH_CRITERIA_MISC6 = 1 << 7,
+};
+
+enum mlx5hws_definer_type {
+       MLX5HWS_DEFINER_TYPE_MATCH,
+       MLX5HWS_DEFINER_TYPE_JUMBO,
+};
+
+enum mlx5hws_definer_match_flag {
+       MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE = 1 << 0,
+       MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE = 1 << 1,
+       MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU = 1 << 2,
+       MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE = 1 << 3,
+       MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN = 1 << 4,
+       MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1 = 1 << 5,
+
+       MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY = 1 << 6,
+       MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2 = 1 << 7,
+
+       MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE = 1 << 8,
+       MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP = 1 << 9,
+
+       MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4 = 1 << 10,
+       MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6 = 1 << 11,
+       MLX5HWS_DEFINER_MATCH_FLAG_TCP_O = 1 << 12,
+       MLX5HWS_DEFINER_MATCH_FLAG_TCP_I = 1 << 13,
+};
+
+struct mlx5hws_definer_fc {
+       struct mlx5hws_context *ctx;
+       /* Source */
+       u32 s_byte_off;
+       int s_bit_off;
+       u32 s_bit_mask;
+       /* Destination */
+       u32 byte_off;
+       int bit_off;
+       u32 bit_mask;
+       enum mlx5hws_definer_fname fname;
+       void (*tag_set)(struct mlx5hws_definer_fc *fc,
+                       void *mach_param,
+                       u8 *tag);
+       void (*tag_mask_set)(struct mlx5hws_definer_fc *fc,
+                            void *mach_param,
+                            u8 *tag);
+};
+
+struct mlx5_ifc_definer_hl_eth_l2_bits {
+       u8 dmac_47_16[0x20];
+       u8 dmac_15_0[0x10];
+       u8 l3_ethertype[0x10];
+       u8 reserved_at_40[0x1];
+       u8 sx_sniffer[0x1];
+       u8 functional_lb[0x1];
+       u8 ip_fragmented[0x1];
+       u8 qp_type[0x2];
+       u8 encap_type[0x2];
+       u8 port_number[0x2];
+       u8 l3_type[0x2];
+       u8 l4_type_bwc[0x2];
+       u8 first_vlan_qualifier[0x2];
+       u8 first_priority[0x3];
+       u8 first_cfi[0x1];
+       u8 first_vlan_id[0xc];
+       u8 l4_type[0x4];
+       u8 reserved_at_64[0x2];
+       u8 ipsec_layer[0x2];
+       u8 l2_type[0x2];
+       u8 force_lb[0x1];
+       u8 l2_ok[0x1];
+       u8 l3_ok[0x1];
+       u8 l4_ok[0x1];
+       u8 second_vlan_qualifier[0x2];
+       u8 second_priority[0x3];
+       u8 second_cfi[0x1];
+       u8 second_vlan_id[0xc];
+};
+
+struct mlx5_ifc_definer_hl_eth_l2_src_bits {
+       u8 smac_47_16[0x20];
+       u8 smac_15_0[0x10];
+       u8 loopback_syndrome[0x8];
+       u8 l3_type[0x2];
+       u8 l4_type_bwc[0x2];
+       u8 first_vlan_qualifier[0x2];
+       u8 ip_fragmented[0x1];
+       u8 functional_lb[0x1];
+};
+
+struct mlx5_ifc_definer_hl_ib_l2_bits {
+       u8 sx_sniffer[0x1];
+       u8 force_lb[0x1];
+       u8 functional_lb[0x1];
+       u8 reserved_at_3[0x3];
+       u8 port_number[0x2];
+       u8 sl[0x4];
+       u8 qp_type[0x2];
+       u8 lnh[0x2];
+       u8 dlid[0x10];
+       u8 vl[0x4];
+       u8 lrh_packet_length[0xc];
+       u8 slid[0x10];
+};
+
+struct mlx5_ifc_definer_hl_eth_l3_bits {
+       u8 ip_version[0x4];
+       u8 ihl[0x4];
+       union {
+               u8 tos[0x8];
+               struct {
+                       u8 dscp[0x6];
+                       u8 ecn[0x2];
+               };
+       };
+       u8 time_to_live_hop_limit[0x8];
+       u8 protocol_next_header[0x8];
+       u8 identification[0x10];
+       union {
+               u8 ipv4_frag[0x10];
+               struct {
+                       u8 flags[0x3];
+                       u8 fragment_offset[0xd];
+               };
+       };
+       u8 ipv4_total_length[0x10];
+       u8 checksum[0x10];
+       u8 reserved_at_60[0xc];
+       u8 flow_label[0x14];
+       u8 packet_length[0x10];
+       u8 ipv6_payload_length[0x10];
+};
+
+struct mlx5_ifc_definer_hl_eth_l4_bits {
+       u8 source_port[0x10];
+       u8 destination_port[0x10];
+       u8 data_offset[0x4];
+       u8 l4_ok[0x1];
+       u8 l3_ok[0x1];
+       u8 ip_fragmented[0x1];
+       u8 tcp_ns[0x1];
+       union {
+               u8 tcp_flags[0x8];
+               struct {
+                       u8 tcp_cwr[0x1];
+                       u8 tcp_ece[0x1];
+                       u8 tcp_urg[0x1];
+                       u8 tcp_ack[0x1];
+                       u8 tcp_psh[0x1];
+                       u8 tcp_rst[0x1];
+                       u8 tcp_syn[0x1];
+                       u8 tcp_fin[0x1];
+               };
+       };
+       u8 first_fragment[0x1];
+       u8 reserved_at_31[0xf];
+};
+
+struct mlx5_ifc_definer_hl_src_qp_gvmi_bits {
+       u8 loopback_syndrome[0x8];
+       u8 l3_type[0x2];
+       u8 l4_type_bwc[0x2];
+       u8 first_vlan_qualifier[0x2];
+       u8 reserved_at_e[0x1];
+       u8 functional_lb[0x1];
+       u8 source_gvmi[0x10];
+       u8 force_lb[0x1];
+       u8 ip_fragmented[0x1];
+       u8 source_is_requestor[0x1];
+       u8 reserved_at_23[0x5];
+       u8 source_qp[0x18];
+};
+
+struct mlx5_ifc_definer_hl_ib_l4_bits {
+       u8 opcode[0x8];
+       u8 qp[0x18];
+       u8 se[0x1];
+       u8 migreq[0x1];
+       u8 ackreq[0x1];
+       u8 fecn[0x1];
+       u8 becn[0x1];
+       u8 bth[0x1];
+       u8 deth[0x1];
+       u8 dcceth[0x1];
+       u8 reserved_at_28[0x2];
+       u8 pad_count[0x2];
+       u8 tver[0x4];
+       u8 p_key[0x10];
+       u8 reserved_at_40[0x8];
+       u8 deth_source_qp[0x18];
+};
+
+enum mlx5hws_integrity_ok1_bits {
+       MLX5HWS_DEFINER_OKS1_FIRST_L4_OK = 24,
+       MLX5HWS_DEFINER_OKS1_FIRST_L3_OK = 25,
+       MLX5HWS_DEFINER_OKS1_SECOND_L4_OK = 26,
+       MLX5HWS_DEFINER_OKS1_SECOND_L3_OK = 27,
+       MLX5HWS_DEFINER_OKS1_FIRST_L4_CSUM_OK = 28,
+       MLX5HWS_DEFINER_OKS1_FIRST_IPV4_CSUM_OK = 29,
+       MLX5HWS_DEFINER_OKS1_SECOND_L4_CSUM_OK = 30,
+       MLX5HWS_DEFINER_OKS1_SECOND_IPV4_CSUM_OK = 31,
+};
+
+struct mlx5_ifc_definer_hl_oks1_bits {
+       union {
+               u8 oks1_bits[0x20];
+               struct {
+                       u8 second_ipv4_checksum_ok[0x1];
+                       u8 second_l4_checksum_ok[0x1];
+                       u8 first_ipv4_checksum_ok[0x1];
+                       u8 first_l4_checksum_ok[0x1];
+                       u8 second_l3_ok[0x1];
+                       u8 second_l4_ok[0x1];
+                       u8 first_l3_ok[0x1];
+                       u8 first_l4_ok[0x1];
+                       u8 flex_parser7_steering_ok[0x1];
+                       u8 flex_parser6_steering_ok[0x1];
+                       u8 flex_parser5_steering_ok[0x1];
+                       u8 flex_parser4_steering_ok[0x1];
+                       u8 flex_parser3_steering_ok[0x1];
+                       u8 flex_parser2_steering_ok[0x1];
+                       u8 flex_parser1_steering_ok[0x1];
+                       u8 flex_parser0_steering_ok[0x1];
+                       u8 second_ipv6_extension_header_vld[0x1];
+                       u8 first_ipv6_extension_header_vld[0x1];
+                       u8 l3_tunneling_ok[0x1];
+                       u8 l2_tunneling_ok[0x1];
+                       u8 second_tcp_ok[0x1];
+                       u8 second_udp_ok[0x1];
+                       u8 second_ipv4_ok[0x1];
+                       u8 second_ipv6_ok[0x1];
+                       u8 second_l2_ok[0x1];
+                       u8 vxlan_ok[0x1];
+                       u8 gre_ok[0x1];
+                       u8 first_tcp_ok[0x1];
+                       u8 first_udp_ok[0x1];
+                       u8 first_ipv4_ok[0x1];
+                       u8 first_ipv6_ok[0x1];
+                       u8 first_l2_ok[0x1];
+               };
+       };
+};
+
+struct mlx5_ifc_definer_hl_oks2_bits {
+       u8 reserved_at_0[0xa];
+       u8 second_mpls_ok[0x1];
+       u8 second_mpls4_s_bit[0x1];
+       u8 second_mpls4_qualifier[0x1];
+       u8 second_mpls3_s_bit[0x1];
+       u8 second_mpls3_qualifier[0x1];
+       u8 second_mpls2_s_bit[0x1];
+       u8 second_mpls2_qualifier[0x1];
+       u8 second_mpls1_s_bit[0x1];
+       u8 second_mpls1_qualifier[0x1];
+       u8 second_mpls0_s_bit[0x1];
+       u8 second_mpls0_qualifier[0x1];
+       u8 first_mpls_ok[0x1];
+       u8 first_mpls4_s_bit[0x1];
+       u8 first_mpls4_qualifier[0x1];
+       u8 first_mpls3_s_bit[0x1];
+       u8 first_mpls3_qualifier[0x1];
+       u8 first_mpls2_s_bit[0x1];
+       u8 first_mpls2_qualifier[0x1];
+       u8 first_mpls1_s_bit[0x1];
+       u8 first_mpls1_qualifier[0x1];
+       u8 first_mpls0_s_bit[0x1];
+       u8 first_mpls0_qualifier[0x1];
+};
+
+struct mlx5_ifc_definer_hl_voq_bits {
+       u8 reserved_at_0[0x18];
+       u8 ecn_ok[0x1];
+       u8 congestion[0x1];
+       u8 profile[0x2];
+       u8 internal_prio[0x4];
+};
+
+struct mlx5_ifc_definer_hl_ipv4_src_dst_bits {
+       u8 source_address[0x20];
+       u8 destination_address[0x20];
+};
+
+struct mlx5_ifc_definer_hl_random_number_bits {
+       u8 random_number[0x10];
+       u8 reserved[0x10];
+};
+
+struct mlx5_ifc_definer_hl_ipv6_addr_bits {
+       u8 ipv6_address_127_96[0x20];
+       u8 ipv6_address_95_64[0x20];
+       u8 ipv6_address_63_32[0x20];
+       u8 ipv6_address_31_0[0x20];
+};
+
+struct mlx5_ifc_definer_tcp_icmp_header_bits {
+       union {
+               struct {
+                       u8 icmp_dw1[0x20];
+                       u8 icmp_dw2[0x20];
+                       u8 icmp_dw3[0x20];
+               };
+               struct {
+                       u8 tcp_seq[0x20];
+                       u8 tcp_ack[0x20];
+                       u8 tcp_win_urg[0x20];
+               };
+       };
+};
+
+struct mlx5_ifc_definer_hl_tunnel_header_bits {
+       u8 tunnel_header_0[0x20];
+       u8 tunnel_header_1[0x20];
+       u8 tunnel_header_2[0x20];
+       u8 tunnel_header_3[0x20];
+};
+
+struct mlx5_ifc_definer_hl_ipsec_bits {
+       u8 spi[0x20];
+       u8 sequence_number[0x20];
+       u8 reserved[0x10];
+       u8 ipsec_syndrome[0x8];
+       u8 next_header[0x8];
+};
+
+struct mlx5_ifc_definer_hl_metadata_bits {
+       u8 metadata_to_cqe[0x20];
+       u8 general_purpose[0x20];
+       u8 acomulated_hash[0x20];
+};
+
+struct mlx5_ifc_definer_hl_flex_parser_bits {
+       u8 flex_parser_7[0x20];
+       u8 flex_parser_6[0x20];
+       u8 flex_parser_5[0x20];
+       u8 flex_parser_4[0x20];
+       u8 flex_parser_3[0x20];
+       u8 flex_parser_2[0x20];
+       u8 flex_parser_1[0x20];
+       u8 flex_parser_0[0x20];
+};
+
+struct mlx5_ifc_definer_hl_registers_bits {
+       u8 register_c_10[0x20];
+       u8 register_c_11[0x20];
+       u8 register_c_8[0x20];
+       u8 register_c_9[0x20];
+       u8 register_c_6[0x20];
+       u8 register_c_7[0x20];
+       u8 register_c_4[0x20];
+       u8 register_c_5[0x20];
+       u8 register_c_2[0x20];
+       u8 register_c_3[0x20];
+       u8 register_c_0[0x20];
+       u8 register_c_1[0x20];
+};
+
+struct mlx5_ifc_definer_hl_mpls_bits {
+       u8 mpls0_label[0x20];
+       u8 mpls1_label[0x20];
+       u8 mpls2_label[0x20];
+       u8 mpls3_label[0x20];
+       u8 mpls4_label[0x20];
+};
+
+struct mlx5_ifc_definer_hl_bits {
+       struct mlx5_ifc_definer_hl_eth_l2_bits eth_l2_outer;
+       struct mlx5_ifc_definer_hl_eth_l2_bits eth_l2_inner;
+       struct mlx5_ifc_definer_hl_eth_l2_src_bits eth_l2_src_outer;
+       struct mlx5_ifc_definer_hl_eth_l2_src_bits eth_l2_src_inner;
+       struct mlx5_ifc_definer_hl_ib_l2_bits ib_l2;
+       struct mlx5_ifc_definer_hl_eth_l3_bits eth_l3_outer;
+       struct mlx5_ifc_definer_hl_eth_l3_bits eth_l3_inner;
+       struct mlx5_ifc_definer_hl_eth_l4_bits eth_l4_outer;
+       struct mlx5_ifc_definer_hl_eth_l4_bits eth_l4_inner;
+       struct mlx5_ifc_definer_hl_src_qp_gvmi_bits source_qp_gvmi;
+       struct mlx5_ifc_definer_hl_ib_l4_bits ib_l4;
+       struct mlx5_ifc_definer_hl_oks1_bits oks1;
+       struct mlx5_ifc_definer_hl_oks2_bits oks2;
+       struct mlx5_ifc_definer_hl_voq_bits voq;
+       u8 reserved_at_480[0x380];
+       struct mlx5_ifc_definer_hl_ipv4_src_dst_bits ipv4_src_dest_outer;
+       struct mlx5_ifc_definer_hl_ipv4_src_dst_bits ipv4_src_dest_inner;
+       struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_dst_outer;
+       struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_dst_inner;
+       struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_src_outer;
+       struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_src_inner;
+       u8 unsupported_dest_ib_l3[0x80];
+       u8 unsupported_source_ib_l3[0x80];
+       u8 unsupported_udp_misc_outer[0x20];
+       u8 unsupported_udp_misc_inner[0x20];
+       struct mlx5_ifc_definer_tcp_icmp_header_bits tcp_icmp;
+       struct mlx5_ifc_definer_hl_tunnel_header_bits tunnel_header;
+       struct mlx5_ifc_definer_hl_mpls_bits mpls_outer;
+       struct mlx5_ifc_definer_hl_mpls_bits mpls_inner;
+       u8 unsupported_config_headers_outer[0x80];
+       u8 unsupported_config_headers_inner[0x80];
+       struct mlx5_ifc_definer_hl_random_number_bits random_number;
+       struct mlx5_ifc_definer_hl_ipsec_bits ipsec;
+       struct mlx5_ifc_definer_hl_metadata_bits metadata;
+       u8 unsupported_utc_timestamp[0x40];
+       u8 unsupported_free_running_timestamp[0x40];
+       struct mlx5_ifc_definer_hl_flex_parser_bits flex_parser;
+       struct mlx5_ifc_definer_hl_registers_bits registers;
+       /* Reserved in case header layout on future HW */
+       u8 unsupported_reserved[0xd40];
+};
+
+enum mlx5hws_definer_gtp {
+       MLX5HWS_DEFINER_GTP_EXT_HDR_BIT = 0x04,
+};
+
+struct mlx5_ifc_header_gtp_bits {
+       u8 version[0x3];
+       u8 proto_type[0x1];
+       u8 reserved1[0x1];
+       union {
+               u8 msg_flags[0x3];
+               struct {
+                       u8 ext_hdr_flag[0x1];
+                       u8 seq_num_flag[0x1];
+                       u8 pdu_flag[0x1];
+               };
+       };
+       u8 msg_type[0x8];
+       u8 msg_len[0x8];
+       u8 teid[0x20];
+};
+
+struct mlx5_ifc_header_opt_gtp_bits {
+       u8 seq_num[0x10];
+       u8 pdu_num[0x8];
+       u8 next_ext_hdr_type[0x8];
+};
+
+struct mlx5_ifc_header_gtp_psc_bits {
+       u8 len[0x8];
+       u8 pdu_type[0x4];
+       u8 flags[0x4];
+       u8 qfi[0x8];
+       u8 reserved2[0x8];
+};
+
+struct mlx5_ifc_header_ipv6_vtc_bits {
+       u8 version[0x4];
+       union {
+               u8 tos[0x8];
+               struct {
+                       u8 dscp[0x6];
+                       u8 ecn[0x2];
+               };
+       };
+       u8 flow_label[0x14];
+};
+
+struct mlx5_ifc_header_ipv6_routing_ext_bits {
+       u8 next_hdr[0x8];
+       u8 hdr_len[0x8];
+       u8 type[0x8];
+       u8 segments_left[0x8];
+       union {
+               u8 flags[0x20];
+               struct {
+                       u8 last_entry[0x8];
+                       u8 flag[0x8];
+                       u8 tag[0x10];
+               };
+       };
+};
+
+struct mlx5_ifc_header_vxlan_bits {
+       u8 flags[0x8];
+       u8 reserved1[0x18];
+       u8 vni[0x18];
+       u8 reserved2[0x8];
+};
+
+struct mlx5_ifc_header_vxlan_gpe_bits {
+       u8 flags[0x8];
+       u8 rsvd0[0x10];
+       u8 protocol[0x8];
+       u8 vni[0x18];
+       u8 rsvd1[0x8];
+};
+
+struct mlx5_ifc_header_gre_bits {
+       union {
+               u8 c_rsvd0_ver[0x10];
+               struct {
+                       u8 gre_c_present[0x1];
+                       u8 reserved_at_1[0x1];
+                       u8 gre_k_present[0x1];
+                       u8 gre_s_present[0x1];
+                       u8 reserved_at_4[0x9];
+                       u8 version[0x3];
+               };
+       };
+       u8 gre_protocol[0x10];
+       u8 checksum[0x10];
+       u8 reserved_at_30[0x10];
+};
+
+struct mlx5_ifc_header_geneve_bits {
+       union {
+               u8 ver_opt_len_o_c_rsvd[0x10];
+               struct {
+                       u8 version[0x2];
+                       u8 opt_len[0x6];
+                       u8 o_flag[0x1];
+                       u8 c_flag[0x1];
+                       u8 reserved_at_a[0x6];
+               };
+       };
+       u8 protocol_type[0x10];
+       u8 vni[0x18];
+       u8 reserved_at_38[0x8];
+};
+
+struct mlx5_ifc_header_geneve_opt_bits {
+       u8 class[0x10];
+       u8 type[0x8];
+       u8 reserved[0x3];
+       u8 len[0x5];
+};
+
+struct mlx5_ifc_header_icmp_bits {
+       union {
+               u8 icmp_dw1[0x20];
+               struct {
+                       u8 type[0x8];
+                       u8 code[0x8];
+                       u8 cksum[0x10];
+               };
+       };
+       union {
+               u8 icmp_dw2[0x20];
+               struct {
+                       u8 ident[0x10];
+                       u8 seq_nb[0x10];
+               };
+       };
+};
+
+struct mlx5hws_definer {
+       enum mlx5hws_definer_type type;
+       u8 dw_selector[DW_SELECTORS];
+       u8 byte_selector[BYTE_SELECTORS];
+       struct mlx5hws_rule_match_tag mask;
+       u32 obj_id;
+};
+
+struct mlx5hws_definer_cache {
+       struct list_head list_head;
+};
+
+struct mlx5hws_definer_cache_item {
+       struct mlx5hws_definer definer;
+       u32 refcount;
+       struct list_head list_node;
+};
+
+static inline bool
+mlx5hws_definer_is_jumbo(struct mlx5hws_definer *definer)
+{
+       return (definer->type == MLX5HWS_DEFINER_TYPE_JUMBO);
+}
+
+void mlx5hws_definer_create_tag(u32 *match_param,
+                               struct mlx5hws_definer_fc *fc,
+                               u32 fc_sz,
+                               u8 *tag);
+
+int mlx5hws_definer_get_id(struct mlx5hws_definer *definer);
+
+int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx,
+                           struct mlx5hws_match_template *mt);
+
+void mlx5hws_definer_mt_uninit(struct mlx5hws_context *ctx,
+                              struct mlx5hws_match_template *mt);
+
+int mlx5hws_definer_init_cache(struct mlx5hws_definer_cache **cache);
+
+void mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache *cache);
+
+int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a,
+                           struct mlx5hws_definer *definer_b);
+
+int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx,
+                           struct mlx5hws_definer *definer);
+
+void mlx5hws_definer_free(struct mlx5hws_context *ctx,
+                         struct mlx5hws_definer *definer);
+
+int mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
+                               struct mlx5hws_match_template *mt,
+                               struct mlx5hws_definer *match_definer);
+
+struct mlx5hws_definer_fc *
+mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
+                                                  u8 match_criteria_enable,
+                                                  u32 *match_param,
+                                                  int *fc_sz);
+
+#endif /* HWS_DEFINER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
new file mode 100644 (file)
index 0000000..3c8635f
--- /dev/null
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef HWS_INTERNAL_H_
+#define HWS_INTERNAL_H_
+
+#include <linux/mlx5/transobj.h>
+#include <linux/mlx5/vport.h>
+#include "fs_core.h"
+#include "wq.h"
+#include "lib/mlx5.h"
+
+#include "prm.h"
+#include "mlx5hws.h"
+#include "pool.h"
+#include "vport.h"
+#include "context.h"
+#include "table.h"
+#include "send.h"
+#include "rule.h"
+#include "cmd.h"
+#include "action.h"
+#include "definer.h"
+#include "matcher.h"
+#include "debug.h"
+#include "pat_arg.h"
+#include "bwc.h"
+#include "bwc_complex.h"
+
+#define W_SIZE         2
+#define DW_SIZE                4
+#define BITS_IN_BYTE   8
+#define BITS_IN_DW     (BITS_IN_BYTE * DW_SIZE)
+
+#define IS_BIT_SET(_value, _bit) ((_value) & (1ULL << (_bit)))
+
+#define mlx5hws_err(ctx, arg...) mlx5_core_err((ctx)->mdev, ##arg)
+#define mlx5hws_info(ctx, arg...) mlx5_core_info((ctx)->mdev, ##arg)
+#define mlx5hws_dbg(ctx, arg...) mlx5_core_dbg((ctx)->mdev, ##arg)
+
+#define MLX5HWS_TABLE_TYPE_BASE 2
+#define MLX5HWS_ACTION_STE_IDX_ANY 0
+
+static inline bool is_mem_zero(const u8 *mem, size_t size)
+{
+       if (unlikely(!size)) {
+               pr_warn("HWS: invalid buffer of size 0 in %s\n", __func__);
+               return true;
+       }
+
+       return (*mem == 0) && memcmp(mem, mem + 1, size - 1) == 0;
+}
+
+static inline unsigned long align(unsigned long val, unsigned long align)
+{
+       return (val + align - 1) & ~(align - 1);
+}
+
+#endif /* HWS_INTERNAL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
new file mode 100644 (file)
index 0000000..1bb3a6f
--- /dev/null
@@ -0,0 +1,1216 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+enum mlx5hws_matcher_rtc_type {
+       HWS_MATCHER_RTC_TYPE_MATCH,
+       HWS_MATCHER_RTC_TYPE_STE_ARRAY,
+       HWS_MATCHER_RTC_TYPE_MAX,
+};
+
+static const char * const mlx5hws_matcher_rtc_type_str[] = {
+       [HWS_MATCHER_RTC_TYPE_MATCH] = "MATCH",
+       [HWS_MATCHER_RTC_TYPE_STE_ARRAY] = "STE_ARRAY",
+       [HWS_MATCHER_RTC_TYPE_MAX] = "UNKNOWN",
+};
+
+static const char *hws_matcher_rtc_type_to_str(enum mlx5hws_matcher_rtc_type rtc_type)
+{
+       if (rtc_type > HWS_MATCHER_RTC_TYPE_MAX)
+               rtc_type = HWS_MATCHER_RTC_TYPE_MAX;
+       return mlx5hws_matcher_rtc_type_str[rtc_type];
+}
+
+static bool hws_matcher_requires_col_tbl(u8 log_num_of_rules)
+{
+       /* Collision table concatenation is done only for large rule tables */
+       return log_num_of_rules > MLX5HWS_MATCHER_ASSURED_RULES_TH;
+}
+
+static u8 hws_matcher_rules_to_tbl_depth(u8 log_num_of_rules)
+{
+       if (hws_matcher_requires_col_tbl(log_num_of_rules))
+               return MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH;
+
+       /* For small rule tables we use a single deep table to assure insertion */
+       return min(log_num_of_rules, MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH);
+}
+
+static void hws_matcher_destroy_end_ft(struct mlx5hws_matcher *matcher)
+{
+       mlx5hws_table_destroy_default_ft(matcher->tbl, matcher->end_ft_id);
+}
+
+static int hws_matcher_create_end_ft(struct mlx5hws_matcher *matcher)
+{
+       struct mlx5hws_table *tbl = matcher->tbl;
+       int ret;
+
+       ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &matcher->end_ft_id);
+       if (ret) {
+               mlx5hws_err(tbl->ctx, "Failed to create matcher end flow table\n");
+               return ret;
+       }
+       return 0;
+}
+
+static int hws_matcher_connect(struct mlx5hws_matcher *matcher)
+{
+       struct mlx5hws_table *tbl = matcher->tbl;
+       struct mlx5hws_context *ctx = tbl->ctx;
+       struct mlx5hws_matcher *prev = NULL;
+       struct mlx5hws_matcher *next = NULL;
+       struct mlx5hws_matcher *tmp_matcher;
+       int ret;
+
+       /* Find location in matcher list */
+       if (list_empty(&tbl->matchers_list)) {
+               list_add(&matcher->list_node, &tbl->matchers_list);
+               goto connect;
+       }
+
+       list_for_each_entry(tmp_matcher, &tbl->matchers_list, list_node) {
+               if (tmp_matcher->attr.priority > matcher->attr.priority) {
+                       next = tmp_matcher;
+                       break;
+               }
+               prev = tmp_matcher;
+       }
+
+       if (next)
+               /* insert before next */
+               list_add_tail(&matcher->list_node, &next->list_node);
+       else
+               /* insert after prev */
+               list_add(&matcher->list_node, &prev->list_node);
+
+connect:
+       if (next) {
+               /* Connect to next RTC */
+               ret = mlx5hws_table_ft_set_next_rtc(ctx,
+                                                   matcher->end_ft_id,
+                                                   tbl->fw_ft_type,
+                                                   next->match_ste.rtc_0_id,
+                                                   next->match_ste.rtc_1_id);
+               if (ret) {
+                       mlx5hws_err(ctx, "Failed to connect new matcher to next RTC\n");
+                       goto remove_from_list;
+               }
+       } else {
+               /* Connect last matcher to next miss_tbl if exists */
+               ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl);
+               if (ret) {
+                       mlx5hws_err(ctx, "Failed connect new matcher to miss_tbl\n");
+                       goto remove_from_list;
+               }
+       }
+
+       /* Connect to previous FT */
+       ret = mlx5hws_table_ft_set_next_rtc(ctx,
+                                           prev ? prev->end_ft_id : tbl->ft_id,
+                                           tbl->fw_ft_type,
+                                           matcher->match_ste.rtc_0_id,
+                                           matcher->match_ste.rtc_1_id);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to connect new matcher to previous FT\n");
+               goto remove_from_list;
+       }
+
+       /* Reset prev matcher FT default miss (drop refcount) */
+       ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev ? prev->end_ft_id : tbl->ft_id);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to reset matcher ft default miss\n");
+               goto remove_from_list;
+       }
+
+       if (!prev) {
+               /* Update tables missing to current matcher in the table */
+               ret = mlx5hws_table_update_connected_miss_tables(tbl);
+               if (ret) {
+                       mlx5hws_err(ctx, "Fatal error, failed to update connected miss table\n");
+                       goto remove_from_list;
+               }
+       }
+
+       return 0;
+
+remove_from_list:
+       list_del_init(&matcher->list_node);
+       return ret;
+}
+
+static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
+{
+       struct mlx5hws_matcher *next = NULL, *prev = NULL;
+       struct mlx5hws_table *tbl = matcher->tbl;
+       u32 prev_ft_id = tbl->ft_id;
+       int ret;
+
+       if (!list_is_first(&matcher->list_node, &tbl->matchers_list)) {
+               prev = list_prev_entry(matcher, list_node);
+               prev_ft_id = prev->end_ft_id;
+       }
+
+       if (!list_is_last(&matcher->list_node, &tbl->matchers_list))
+               next = list_next_entry(matcher, list_node);
+
+       list_del_init(&matcher->list_node);
+
+       if (next) {
+               /* Connect previous end FT to next RTC */
+               ret = mlx5hws_table_ft_set_next_rtc(tbl->ctx,
+                                                   prev_ft_id,
+                                                   tbl->fw_ft_type,
+                                                   next->match_ste.rtc_0_id,
+                                                   next->match_ste.rtc_1_id);
+               if (ret) {
+                       mlx5hws_err(tbl->ctx, "Failed to disconnect matcher\n");
+                       goto matcher_reconnect;
+               }
+       } else {
+               ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl);
+               if (ret) {
+                       mlx5hws_err(tbl->ctx, "Failed to disconnect last matcher\n");
+                       goto matcher_reconnect;
+               }
+       }
+
+       /* Removing first matcher, update connected miss tables if exists */
+       if (prev_ft_id == tbl->ft_id) {
+               ret = mlx5hws_table_update_connected_miss_tables(tbl);
+               if (ret) {
+                       mlx5hws_err(tbl->ctx, "Fatal error, failed to update connected miss table\n");
+                       goto matcher_reconnect;
+               }
+       }
+
+       ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev_ft_id);
+       if (ret) {
+               mlx5hws_err(tbl->ctx, "Fatal error, failed to restore matcher ft default miss\n");
+               goto matcher_reconnect;
+       }
+
+       return 0;
+
+matcher_reconnect:
+       if (list_empty(&tbl->matchers_list) || !prev)
+               list_add(&matcher->list_node, &tbl->matchers_list);
+       else
+               /* insert after prev matcher */
+               list_add(&matcher->list_node, &prev->list_node);
+
+       return ret;
+}
+
+static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher,
+                                       struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
+                                       enum mlx5hws_matcher_rtc_type rtc_type,
+                                       bool is_mirror)
+{
+       struct mlx5hws_pool_chunk *ste = &matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].ste;
+       enum mlx5hws_matcher_flow_src flow_src = matcher->attr.optimize_flow_src;
+       bool is_match_rtc = rtc_type == HWS_MATCHER_RTC_TYPE_MATCH;
+
+       if ((flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT && !is_mirror) ||
+           (flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE && is_mirror)) {
+               /* Optimize FDB RTC */
+               rtc_attr->log_size = 0;
+               rtc_attr->log_depth = 0;
+       } else {
+               /* Keep original values */
+               rtc_attr->log_size = is_match_rtc ? matcher->attr.table.sz_row_log : ste->order;
+               rtc_attr->log_depth = is_match_rtc ? matcher->attr.table.sz_col_log : 0;
+       }
+}
+
+static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher,
+                                 enum mlx5hws_matcher_rtc_type rtc_type,
+                                 u8 action_ste_selector)
+{
+       struct mlx5hws_matcher_attr *attr = &matcher->attr;
+       struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
+       struct mlx5hws_match_template *mt = matcher->mt;
+       struct mlx5hws_context *ctx = matcher->tbl->ctx;
+       struct mlx5hws_action_default_stc *default_stc;
+       struct mlx5hws_matcher_action_ste *action_ste;
+       struct mlx5hws_table *tbl = matcher->tbl;
+       struct mlx5hws_pool *ste_pool, *stc_pool;
+       struct mlx5hws_pool_chunk *ste;
+       u32 *rtc_0_id, *rtc_1_id;
+       u32 obj_id;
+       int ret;
+
+       switch (rtc_type) {
+       case HWS_MATCHER_RTC_TYPE_MATCH:
+               rtc_0_id = &matcher->match_ste.rtc_0_id;
+               rtc_1_id = &matcher->match_ste.rtc_1_id;
+               ste_pool = matcher->match_ste.pool;
+               ste = &matcher->match_ste.ste;
+               ste->order = attr->table.sz_col_log + attr->table.sz_row_log;
+
+               rtc_attr.log_size = attr->table.sz_row_log;
+               rtc_attr.log_depth = attr->table.sz_col_log;
+               rtc_attr.is_frst_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
+               rtc_attr.is_scnd_range = 0;
+               rtc_attr.miss_ft_id = matcher->end_ft_id;
+
+               if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) {
+                       /* The usual Hash Table */
+                       rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
+
+                       /* The first mt is used since all share the same definer */
+                       rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
+               } else if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX) {
+                       rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
+                       rtc_attr.num_hash_definer = 1;
+
+                       if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
+                               /* Hash Split Table */
+                               rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
+                               rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
+                       } else if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) {
+                               /* Linear Lookup Table */
+                               rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR;
+                               rtc_attr.match_definer_0 = ctx->caps->linear_match_definer;
+                       }
+               }
+
+               /* Match pool requires implicit allocation */
+               ret = mlx5hws_pool_chunk_alloc(ste_pool, ste);
+               if (ret) {
+                       mlx5hws_err(ctx, "Failed to allocate STE for %s RTC",
+                                   hws_matcher_rtc_type_to_str(rtc_type));
+                       return ret;
+               }
+               break;
+
+       case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
+               action_ste = &matcher->action_ste[action_ste_selector];
+
+               rtc_0_id = &action_ste->rtc_0_id;
+               rtc_1_id = &action_ste->rtc_1_id;
+               ste_pool = action_ste->pool;
+               ste = &action_ste->ste;
+               ste->order = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
+                            attr->table.sz_row_log;
+               rtc_attr.log_size = ste->order;
+               rtc_attr.log_depth = 0;
+               rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
+               /* The action STEs use the default always hit definer */
+               rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
+               rtc_attr.is_frst_jumbo = false;
+               rtc_attr.miss_ft_id = 0;
+               break;
+
+       default:
+               mlx5hws_err(ctx, "HWS Invalid RTC type\n");
+               return -EINVAL;
+       }
+
+       obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+
+       rtc_attr.pd = ctx->pd_num;
+       rtc_attr.ste_base = obj_id;
+       rtc_attr.ste_offset = ste->offset;
+       rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
+       rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, false);
+       hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, false);
+
+       /* STC is a single resource (obj_id), use any STC for the ID */
+       stc_pool = ctx->stc_pool[tbl->type];
+       default_stc = ctx->common_res[tbl->type].default_stc;
+       obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
+       rtc_attr.stc_base = obj_id;
+
+       ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to create matcher RTC of type %s",
+                           hws_matcher_rtc_type_to_str(rtc_type));
+               goto free_ste;
+       }
+
+       if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+               obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+               rtc_attr.ste_base = obj_id;
+               rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, true);
+
+               obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit);
+               rtc_attr.stc_base = obj_id;
+               hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, true);
+
+               ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id);
+               if (ret) {
+                       mlx5hws_err(ctx, "Failed to create peer matcher RTC of type %s",
+                                   hws_matcher_rtc_type_to_str(rtc_type));
+                       goto destroy_rtc_0;
+               }
+       }
+
+       return 0;
+
+destroy_rtc_0:
+       mlx5hws_cmd_rtc_destroy(ctx->mdev, *rtc_0_id);
+free_ste:
+       if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH)
+               mlx5hws_pool_chunk_free(ste_pool, ste);
+       return ret;
+}
+
+static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher,
+                                   enum mlx5hws_matcher_rtc_type rtc_type,
+                                   u8 action_ste_selector)
+{
+       struct mlx5hws_matcher_action_ste *action_ste;
+       struct mlx5hws_table *tbl = matcher->tbl;
+       struct mlx5hws_pool_chunk *ste;
+       struct mlx5hws_pool *ste_pool;
+       u32 rtc_0_id, rtc_1_id;
+
+       switch (rtc_type) {
+       case HWS_MATCHER_RTC_TYPE_MATCH:
+               rtc_0_id = matcher->match_ste.rtc_0_id;
+               rtc_1_id = matcher->match_ste.rtc_1_id;
+               ste_pool = matcher->match_ste.pool;
+               ste = &matcher->match_ste.ste;
+               break;
+       case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
+               action_ste = &matcher->action_ste[action_ste_selector];
+               rtc_0_id = action_ste->rtc_0_id;
+               rtc_1_id = action_ste->rtc_1_id;
+               ste_pool = action_ste->pool;
+               ste = &action_ste->ste;
+               break;
+       default:
+               return;
+       }
+
+       if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
+               mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_1_id);
+
+       mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_0_id);
+       if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH)
+               mlx5hws_pool_chunk_free(ste_pool, ste);
+}
+
+static int
+hws_matcher_check_attr_sz(struct mlx5hws_cmd_query_caps *caps,
+                         struct mlx5hws_matcher *matcher)
+{
+       struct mlx5hws_matcher_attr *attr = &matcher->attr;
+
+       if (attr->table.sz_col_log > caps->rtc_log_depth_max) {
+               mlx5hws_err(matcher->tbl->ctx, "Matcher depth exceeds limit %d\n",
+                           caps->rtc_log_depth_max);
+               return -EOPNOTSUPP;
+       }
+
+       if (attr->table.sz_col_log + attr->table.sz_row_log > caps->ste_alloc_log_max) {
+               mlx5hws_err(matcher->tbl->ctx, "Total matcher size exceeds limit %d\n",
+                           caps->ste_alloc_log_max);
+               return -EOPNOTSUPP;
+       }
+
+       if (attr->table.sz_col_log + attr->table.sz_row_log < caps->ste_alloc_log_gran) {
+               mlx5hws_err(matcher->tbl->ctx, "Total matcher size below limit %d\n",
+                           caps->ste_alloc_log_gran);
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static void hws_matcher_set_pool_attr(struct mlx5hws_pool_attr *attr,
+                                     struct mlx5hws_matcher *matcher)
+{
+       switch (matcher->attr.optimize_flow_src) {
+       case MLX5HWS_MATCHER_FLOW_SRC_VPORT:
+               attr->opt_type = MLX5HWS_POOL_OPTIMIZE_ORIG;
+               break;
+       case MLX5HWS_MATCHER_FLOW_SRC_WIRE:
+               attr->opt_type = MLX5HWS_POOL_OPTIMIZE_MIRROR;
+               break;
+       default:
+               break;
+       }
+}
+
+static int hws_matcher_check_and_process_at(struct mlx5hws_matcher *matcher,
+                                           struct mlx5hws_action_template *at)
+{
+       struct mlx5hws_context *ctx = matcher->tbl->ctx;
+       bool valid;
+       int ret;
+
+       valid = mlx5hws_action_check_combo(ctx, at->action_type_arr, matcher->tbl->type);
+       if (!valid) {
+               mlx5hws_err(ctx, "Invalid combination in action template\n");
+               return -EINVAL;
+       }
+
+       /* Process action template to setters */
+       ret = mlx5hws_action_template_process(at);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to process action template\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int hws_matcher_resize_init(struct mlx5hws_matcher *src_matcher)
+{
+       struct mlx5hws_matcher_resize_data *resize_data;
+
+       resize_data = kzalloc(sizeof(*resize_data), GFP_KERNEL);
+       if (!resize_data)
+               return -ENOMEM;
+
+       resize_data->max_stes = src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes;
+
+       resize_data->action_ste[0].stc = src_matcher->action_ste[0].stc;
+       resize_data->action_ste[0].rtc_0_id = src_matcher->action_ste[0].rtc_0_id;
+       resize_data->action_ste[0].rtc_1_id = src_matcher->action_ste[0].rtc_1_id;
+       resize_data->action_ste[0].pool = src_matcher->action_ste[0].max_stes ?
+                                         src_matcher->action_ste[0].pool :
+                                         NULL;
+       resize_data->action_ste[1].stc = src_matcher->action_ste[1].stc;
+       resize_data->action_ste[1].rtc_0_id = src_matcher->action_ste[1].rtc_0_id;
+       resize_data->action_ste[1].rtc_1_id = src_matcher->action_ste[1].rtc_1_id;
+       resize_data->action_ste[1].pool = src_matcher->action_ste[1].max_stes ?
+                                         src_matcher->action_ste[1].pool :
+                                          NULL;
+
+       /* Place the new resized matcher on the dst matcher's list */
+       list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data);
+
+       /* Move all the previous resized matchers to the dst matcher's list */
+       while (!list_empty(&src_matcher->resize_data)) {
+               resize_data = list_first_entry(&src_matcher->resize_data,
+                                              struct mlx5hws_matcher_resize_data,
+                                              list_node);
+               list_del_init(&resize_data->list_node);
+               list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data);
+       }
+
+       return 0;
+}
+
+static void hws_matcher_resize_uninit(struct mlx5hws_matcher *matcher)
+{
+       struct mlx5hws_matcher_resize_data *resize_data;
+
+       if (!mlx5hws_matcher_is_resizable(matcher))
+               return;
+
+       while (!list_empty(&matcher->resize_data)) {
+               resize_data = list_first_entry(&matcher->resize_data,
+                                              struct mlx5hws_matcher_resize_data,
+                                              list_node);
+               list_del_init(&resize_data->list_node);
+
+               if (resize_data->max_stes) {
+                       mlx5hws_action_free_single_stc(matcher->tbl->ctx,
+                                                      matcher->tbl->type,
+                                                      &resize_data->action_ste[1].stc);
+                       mlx5hws_action_free_single_stc(matcher->tbl->ctx,
+                                                      matcher->tbl->type,
+                                                      &resize_data->action_ste[0].stc);
+
+                       if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+                               mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
+                                                       resize_data->action_ste[1].rtc_1_id);
+                               mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
+                                                       resize_data->action_ste[0].rtc_1_id);
+                       }
+                       mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
+                                               resize_data->action_ste[1].rtc_0_id);
+                       mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
+                                               resize_data->action_ste[0].rtc_0_id);
+                       if (resize_data->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].pool) {
+                               mlx5hws_pool_destroy(resize_data->action_ste[1].pool);
+                               mlx5hws_pool_destroy(resize_data->action_ste[0].pool);
+                       }
+               }
+
+               kfree(resize_data);
+       }
+}
+
+static int
+hws_matcher_bind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
+{
+       struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+       struct mlx5hws_matcher_action_ste *action_ste;
+       struct mlx5hws_table *tbl = matcher->tbl;
+       struct mlx5hws_pool_attr pool_attr = {0};
+       struct mlx5hws_context *ctx = tbl->ctx;
+       int ret;
+
+       action_ste = &matcher->action_ste[action_ste_selector];
+
+       /* Allocate action STE mempool */
+       pool_attr.table_type = tbl->type;
+       pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
+       pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL;
+       pool_attr.alloc_log_sz = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
+                                matcher->attr.table.sz_row_log;
+       hws_matcher_set_pool_attr(&pool_attr, matcher);
+       action_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
+       if (!action_ste->pool) {
+               mlx5hws_err(ctx, "Failed to create action ste pool\n");
+               return -EINVAL;
+       }
+
+       /* Allocate action RTC */
+       ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to create action RTC\n");
+               goto free_ste_pool;
+       }
+
+       /* Allocate STC for jumps to STE */
+       stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+       stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
+       stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+       stc_attr.ste_table.ste = action_ste->ste;
+       stc_attr.ste_table.ste_pool = action_ste->pool;
+       stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
+
+       ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl->type,
+                                             &action_ste->stc);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to create action jump to table STC\n");
+               goto free_rtc;
+       }
+
+       return 0;
+
+free_rtc:
+       hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+free_ste_pool:
+       mlx5hws_pool_destroy(action_ste->pool);
+       return ret;
+}
+
+static void hws_matcher_unbind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
+{
+       struct mlx5hws_matcher_action_ste *action_ste;
+       struct mlx5hws_table *tbl = matcher->tbl;
+
+       action_ste = &matcher->action_ste[action_ste_selector];
+
+       if (!action_ste->max_stes ||
+           matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION ||
+           mlx5hws_matcher_is_in_resize(matcher))
+               return;
+
+       mlx5hws_action_free_single_stc(tbl->ctx, tbl->type, &action_ste->stc);
+       hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+       mlx5hws_pool_destroy(action_ste->pool);
+}
+
+static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher)
+{
+       bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
+       struct mlx5hws_table *tbl = matcher->tbl;
+       struct mlx5hws_context *ctx = tbl->ctx;
+       u32 required_stes;
+       u8 max_stes = 0;
+       int i, ret;
+
+       if (matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)
+               return 0;
+
+       for (i = 0; i < matcher->num_of_at; i++) {
+               struct mlx5hws_action_template *at = &matcher->at[i];
+
+               ret = hws_matcher_check_and_process_at(matcher, at);
+               if (ret) {
+                       mlx5hws_err(ctx, "Invalid at %d", i);
+                       return ret;
+               }
+
+               required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
+               max_stes = max(max_stes, required_stes);
+
+               /* Future: Optimize reparse */
+       }
+
+       /* There are no additional STEs required for matcher */
+       if (!max_stes)
+               return 0;
+
+       matcher->action_ste[0].max_stes = max_stes;
+       matcher->action_ste[1].max_stes = max_stes;
+
+       ret = hws_matcher_bind_at_idx(matcher, 0);
+       if (ret)
+               return ret;
+
+       ret = hws_matcher_bind_at_idx(matcher, 1);
+       if (ret)
+               goto free_at_0;
+
+       return 0;
+
+free_at_0:
+       hws_matcher_unbind_at_idx(matcher, 0);
+       return ret;
+}
+
+static void hws_matcher_unbind_at(struct mlx5hws_matcher *matcher)
+{
+       hws_matcher_unbind_at_idx(matcher, 1);
+       hws_matcher_unbind_at_idx(matcher, 0);
+}
+
+static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher)
+{
+       struct mlx5hws_context *ctx = matcher->tbl->ctx;
+       struct mlx5hws_pool_attr pool_attr = {0};
+       int ret;
+
+       /* Calculate match, range and hash definers */
+       if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)) {
+               ret = mlx5hws_definer_mt_init(ctx, matcher->mt);
+               if (ret) {
+                       if (ret == -E2BIG)
+                               mlx5hws_err(ctx, "Failed to set matcher templates with match definers\n");
+                       return ret;
+               }
+       }
+
+       /* Create an STE pool per matcher*/
+       pool_attr.table_type = matcher->tbl->type;
+       pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
+       pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL;
+       pool_attr.alloc_log_sz = matcher->attr.table.sz_col_log +
+                                matcher->attr.table.sz_row_log;
+       hws_matcher_set_pool_attr(&pool_attr, matcher);
+
+       matcher->match_ste.pool = mlx5hws_pool_create(ctx, &pool_attr);
+       if (!matcher->match_ste.pool) {
+               mlx5hws_err(ctx, "Failed to allocate matcher STE pool\n");
+               ret = -EOPNOTSUPP;
+               goto uninit_match_definer;
+       }
+
+       return 0;
+
+uninit_match_definer:
+       if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION))
+               mlx5hws_definer_mt_uninit(ctx, matcher->mt);
+       return ret;
+}
+
+static void hws_matcher_unbind_mt(struct mlx5hws_matcher *matcher)
+{
+       mlx5hws_pool_destroy(matcher->match_ste.pool);
+       if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION))
+               mlx5hws_definer_mt_uninit(matcher->tbl->ctx, matcher->mt);
+}
+
+static int
+hws_matcher_validate_insert_mode(struct mlx5hws_cmd_query_caps *caps,
+                                struct mlx5hws_matcher *matcher)
+{
+       struct mlx5hws_matcher_attr *attr = &matcher->attr;
+       struct mlx5hws_context *ctx = matcher->tbl->ctx;
+
+       switch (attr->insert_mode) {
+       case MLX5HWS_MATCHER_INSERT_BY_HASH:
+               if (matcher->attr.distribute_mode != MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
+                       mlx5hws_err(ctx, "Invalid matcher distribute mode\n");
+                       return -EOPNOTSUPP;
+               }
+               break;
+
+       case MLX5HWS_MATCHER_INSERT_BY_INDEX:
+               if (attr->table.sz_col_log) {
+                       mlx5hws_err(ctx, "Matcher with INSERT_BY_INDEX supports only Nx1 table size\n");
+                       return -EOPNOTSUPP;
+               }
+
+               if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
+                       /* Hash Split Table */
+                       if (!caps->rtc_hash_split_table) {
+                               mlx5hws_err(ctx, "FW doesn't support insert by index and hash distribute\n");
+                               return -EOPNOTSUPP;
+                       }
+               } else if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) {
+                       /* Linear Lookup Table */
+                       if (!caps->rtc_linear_lookup_table ||
+                           !IS_BIT_SET(caps->access_index_mode,
+                                       MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR)) {
+                               mlx5hws_err(ctx, "FW doesn't support insert by index and linear distribute\n");
+                               return -EOPNOTSUPP;
+                       }
+
+                       if (attr->table.sz_row_log > MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX) {
+                               mlx5hws_err(ctx, "Matcher with linear distribute: rows exceed limit %d",
+                                           MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX);
+                               return -EOPNOTSUPP;
+                       }
+               } else {
+                       mlx5hws_err(ctx, "Matcher has unsupported distribute mode\n");
+                       return -EOPNOTSUPP;
+               }
+               break;
+
+       default:
+               mlx5hws_err(ctx, "Matcher has unsupported insert mode\n");
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int
+hws_matcher_process_attr(struct mlx5hws_cmd_query_caps *caps,
+                        struct mlx5hws_matcher *matcher)
+{
+       struct mlx5hws_matcher_attr *attr = &matcher->attr;
+
+       if (hws_matcher_validate_insert_mode(caps, matcher))
+               return -EOPNOTSUPP;
+
+       if (matcher->tbl->type != MLX5HWS_TABLE_TYPE_FDB && attr->optimize_flow_src) {
+               mlx5hws_err(matcher->tbl->ctx, "NIC domain doesn't support flow_src\n");
+               return -EOPNOTSUPP;
+       }
+
+       /* Convert number of rules to the required depth */
+       if (attr->mode == MLX5HWS_MATCHER_RESOURCE_MODE_RULE &&
+           attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH)
+               attr->table.sz_col_log = hws_matcher_rules_to_tbl_depth(attr->rule.num_log);
+
+       matcher->flags |= attr->resizable ? MLX5HWS_MATCHER_FLAGS_RESIZABLE : 0;
+
+       return hws_matcher_check_attr_sz(caps, matcher);
+}
+
+static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher)
+{
+       int ret;
+
+       /* Select and create the definers for current matcher */
+       ret = hws_matcher_bind_mt(matcher);
+       if (ret)
+               return ret;
+
+       /* Calculate and verify action combination */
+       ret = hws_matcher_bind_at(matcher);
+       if (ret)
+               goto unbind_mt;
+
+       /* Create matcher end flow table anchor */
+       ret = hws_matcher_create_end_ft(matcher);
+       if (ret)
+               goto unbind_at;
+
+       /* Allocate the RTC for the new matcher */
+       ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+       if (ret)
+               goto destroy_end_ft;
+
+       /* Connect the matcher to the matcher list */
+       ret = hws_matcher_connect(matcher);
+       if (ret)
+               goto destroy_rtc;
+
+       return 0;
+
+destroy_rtc:
+       hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+destroy_end_ft:
+       hws_matcher_destroy_end_ft(matcher);
+unbind_at:
+       hws_matcher_unbind_at(matcher);
+unbind_mt:
+       hws_matcher_unbind_mt(matcher);
+       return ret;
+}
+
+static void hws_matcher_destroy_and_disconnect(struct mlx5hws_matcher *matcher)
+{
+       hws_matcher_resize_uninit(matcher);
+       hws_matcher_disconnect(matcher);
+       hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+       hws_matcher_destroy_end_ft(matcher);
+       hws_matcher_unbind_at(matcher);
+       hws_matcher_unbind_mt(matcher);
+}
+
+static int
+hws_matcher_create_col_matcher(struct mlx5hws_matcher *matcher)
+{
+       struct mlx5hws_context *ctx = matcher->tbl->ctx;
+       struct mlx5hws_matcher *col_matcher;
+       int ret;
+
+       if (matcher->attr.mode != MLX5HWS_MATCHER_RESOURCE_MODE_RULE ||
+           matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX)
+               return 0;
+
+       if (!hws_matcher_requires_col_tbl(matcher->attr.rule.num_log))
+               return 0;
+
+       col_matcher = kzalloc(sizeof(*matcher), GFP_KERNEL);
+       if (!col_matcher)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&col_matcher->resize_data);
+
+       col_matcher->tbl = matcher->tbl;
+       col_matcher->mt = matcher->mt;
+       col_matcher->at = matcher->at;
+       col_matcher->num_of_at = matcher->num_of_at;
+       col_matcher->num_of_mt = matcher->num_of_mt;
+       col_matcher->attr.priority = matcher->attr.priority;
+       col_matcher->flags = matcher->flags;
+       col_matcher->flags |= MLX5HWS_MATCHER_FLAGS_COLLISION;
+       col_matcher->attr.mode = MLX5HWS_MATCHER_RESOURCE_MODE_HTABLE;
+       col_matcher->attr.optimize_flow_src = matcher->attr.optimize_flow_src;
+       col_matcher->attr.table.sz_row_log = matcher->attr.rule.num_log;
+       col_matcher->attr.table.sz_col_log = MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH;
+       if (col_matcher->attr.table.sz_row_log > MLX5HWS_MATCHER_ASSURED_ROW_RATIO)
+               col_matcher->attr.table.sz_row_log -= MLX5HWS_MATCHER_ASSURED_ROW_RATIO;
+
+       col_matcher->attr.max_num_of_at_attach = matcher->attr.max_num_of_at_attach;
+
+       ret = hws_matcher_process_attr(ctx->caps, col_matcher);
+       if (ret)
+               goto free_col_matcher;
+
+       ret = hws_matcher_create_and_connect(col_matcher);
+       if (ret)
+               goto free_col_matcher;
+
+       matcher->col_matcher = col_matcher;
+
+       return 0;
+
+free_col_matcher:
+       kfree(col_matcher);
+       mlx5hws_err(ctx, "Failed to create assured collision matcher\n");
+       return ret;
+}
+
+static void
+hws_matcher_destroy_col_matcher(struct mlx5hws_matcher *matcher)
+{
+       if (matcher->attr.mode != MLX5HWS_MATCHER_RESOURCE_MODE_RULE ||
+           matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX)
+               return;
+
+       if (matcher->col_matcher) {
+               hws_matcher_destroy_and_disconnect(matcher->col_matcher);
+               kfree(matcher->col_matcher);
+       }
+}
+
+static int hws_matcher_init(struct mlx5hws_matcher *matcher)
+{
+       struct mlx5hws_context *ctx = matcher->tbl->ctx;
+       int ret;
+
+       INIT_LIST_HEAD(&matcher->resize_data);
+
+       mutex_lock(&ctx->ctrl_lock);
+
+       /* Allocate matcher resource and connect to the packet pipe */
+       ret = hws_matcher_create_and_connect(matcher);
+       if (ret)
+               goto unlock_err;
+
+       /* Create additional matcher for collision handling */
+       ret = hws_matcher_create_col_matcher(matcher);
+       if (ret)
+               goto destory_and_disconnect;
+       mutex_unlock(&ctx->ctrl_lock);
+
+       return 0;
+
+destory_and_disconnect:
+       hws_matcher_destroy_and_disconnect(matcher);
+unlock_err:
+       mutex_unlock(&ctx->ctrl_lock);
+       return ret;
+}
+
+static int hws_matcher_uninit(struct mlx5hws_matcher *matcher)
+{
+       struct mlx5hws_context *ctx = matcher->tbl->ctx;
+
+       mutex_lock(&ctx->ctrl_lock);
+       hws_matcher_destroy_col_matcher(matcher);
+       hws_matcher_destroy_and_disconnect(matcher);
+       mutex_unlock(&ctx->ctrl_lock);
+
+       return 0;
+}
+
+int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
+                             struct mlx5hws_action_template *at)
+{
+       bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
+       struct mlx5hws_context *ctx = matcher->tbl->ctx;
+       u32 required_stes;
+       int ret;
+
+       if (!matcher->attr.max_num_of_at_attach) {
+               mlx5hws_dbg(ctx, "Num of current at (%d) exceed allowed value\n",
+                           matcher->num_of_at);
+               return -EOPNOTSUPP;
+       }
+
+       ret = hws_matcher_check_and_process_at(matcher, at);
+       if (ret)
+               return ret;
+
+       required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
+       if (matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes < required_stes) {
+               mlx5hws_dbg(ctx, "Required STEs [%d] exceeds initial action template STE [%d]\n",
+                           required_stes,
+                           matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes);
+               return -ENOMEM;
+       }
+
+       matcher->at[matcher->num_of_at] = *at;
+       matcher->num_of_at += 1;
+       matcher->attr.max_num_of_at_attach -= 1;
+
+       if (matcher->col_matcher)
+               matcher->col_matcher->num_of_at = matcher->num_of_at;
+
+       return 0;
+}
+
+static int
+hws_matcher_set_templates(struct mlx5hws_matcher *matcher,
+                         struct mlx5hws_match_template *mt[],
+                         u8 num_of_mt,
+                         struct mlx5hws_action_template *at[],
+                         u8 num_of_at)
+{
+       struct mlx5hws_context *ctx = matcher->tbl->ctx;
+       int ret = 0;
+       int i;
+
+       if (!num_of_mt || !num_of_at) {
+               mlx5hws_err(ctx, "Number of action/match template cannot be zero\n");
+               return -EOPNOTSUPP;
+       }
+
+       matcher->mt = kcalloc(num_of_mt, sizeof(*matcher->mt), GFP_KERNEL);
+       if (!matcher->mt)
+               return -ENOMEM;
+
+       matcher->at = kcalloc(num_of_at + matcher->attr.max_num_of_at_attach,
+                             sizeof(*matcher->at),
+                             GFP_KERNEL);
+       if (!matcher->at) {
+               mlx5hws_err(ctx, "Failed to allocate action template array\n");
+               ret = -ENOMEM;
+               goto free_mt;
+       }
+
+       for (i = 0; i < num_of_mt; i++)
+               matcher->mt[i] = *mt[i];
+
+       for (i = 0; i < num_of_at; i++)
+               matcher->at[i] = *at[i];
+
+       matcher->num_of_mt = num_of_mt;
+       matcher->num_of_at = num_of_at;
+
+       return 0;
+
+free_mt:
+       kfree(matcher->mt);
+       return ret;
+}
+
+static void
+hws_matcher_unset_templates(struct mlx5hws_matcher *matcher)
+{
+       kfree(matcher->at);
+       kfree(matcher->mt);
+}
+
+struct mlx5hws_matcher *
+mlx5hws_matcher_create(struct mlx5hws_table *tbl,
+                      struct mlx5hws_match_template *mt[],
+                      u8 num_of_mt,
+                      struct mlx5hws_action_template *at[],
+                      u8 num_of_at,
+                      struct mlx5hws_matcher_attr *attr)
+{
+       struct mlx5hws_context *ctx = tbl->ctx;
+       struct mlx5hws_matcher *matcher;
+       int ret;
+
+       matcher = kzalloc(sizeof(*matcher), GFP_KERNEL);
+       if (!matcher)
+               return NULL;
+
+       matcher->tbl = tbl;
+       matcher->attr = *attr;
+
+       ret = hws_matcher_process_attr(tbl->ctx->caps, matcher);
+       if (ret)
+               goto free_matcher;
+
+       ret = hws_matcher_set_templates(matcher, mt, num_of_mt, at, num_of_at);
+       if (ret)
+               goto free_matcher;
+
+       ret = hws_matcher_init(matcher);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to initialise matcher: %d\n", ret);
+               goto unset_templates;
+       }
+
+       return matcher;
+
+unset_templates:
+       hws_matcher_unset_templates(matcher);
+free_matcher:
+       kfree(matcher);
+       return NULL;
+}
+
+int mlx5hws_matcher_destroy(struct mlx5hws_matcher *matcher)
+{
+       hws_matcher_uninit(matcher);
+       hws_matcher_unset_templates(matcher);
+       kfree(matcher);
+       return 0;
+}
+
+struct mlx5hws_match_template *
+mlx5hws_match_template_create(struct mlx5hws_context *ctx,
+                             u32 *match_param,
+                             u32 match_param_sz,
+                             u8 match_criteria_enable)
+{
+       struct mlx5hws_match_template *mt;
+
+       mt = kzalloc(sizeof(*mt), GFP_KERNEL);
+       if (!mt)
+               return NULL;
+
+       mt->match_param = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+       if (!mt->match_param)
+               goto free_template;
+
+       memcpy(mt->match_param, match_param, match_param_sz);
+       mt->match_criteria_enable = match_criteria_enable;
+
+       return mt;
+
+free_template:
+       kfree(mt);
+       return NULL;
+}
+
+int mlx5hws_match_template_destroy(struct mlx5hws_match_template *mt)
+{
+       kfree(mt->match_param);
+       kfree(mt);
+       return 0;
+}
+
+static int hws_matcher_resize_precheck(struct mlx5hws_matcher *src_matcher,
+                                      struct mlx5hws_matcher *dst_matcher)
+{
+       struct mlx5hws_context *ctx = src_matcher->tbl->ctx;
+       int i;
+
+       if (src_matcher->tbl->type != dst_matcher->tbl->type) {
+               mlx5hws_err(ctx, "Table type mismatch for src/dst matchers\n");
+               return -EINVAL;
+       }
+
+       if (!mlx5hws_matcher_is_resizable(src_matcher) ||
+           !mlx5hws_matcher_is_resizable(dst_matcher)) {
+               mlx5hws_err(ctx, "Src/dst matcher is not resizable\n");
+               return -EINVAL;
+       }
+
+       if (mlx5hws_matcher_is_insert_by_idx(src_matcher) !=
+           mlx5hws_matcher_is_insert_by_idx(dst_matcher)) {
+               mlx5hws_err(ctx, "Src/dst matchers insert mode mismatch\n");
+               return -EINVAL;
+       }
+
+       if (mlx5hws_matcher_is_in_resize(src_matcher) ||
+           mlx5hws_matcher_is_in_resize(dst_matcher)) {
+               mlx5hws_err(ctx, "Src/dst matcher is already in resize\n");
+               return -EINVAL;
+       }
+
+       /* Compare match templates - make sure the definers are equivalent */
+       if (src_matcher->num_of_mt != dst_matcher->num_of_mt) {
+               mlx5hws_err(ctx, "Src/dst matcher match templates mismatch\n");
+               return -EINVAL;
+       }
+
+       if (src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes >
+           dst_matcher->action_ste[0].max_stes) {
+               mlx5hws_err(ctx, "Src/dst matcher max STEs mismatch\n");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < src_matcher->num_of_mt; i++) {
+               if (mlx5hws_definer_compare(src_matcher->mt[i].definer,
+                                           dst_matcher->mt[i].definer)) {
+                       mlx5hws_err(ctx, "Src/dst matcher definers mismatch\n");
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+int mlx5hws_matcher_resize_set_target(struct mlx5hws_matcher *src_matcher,
+                                     struct mlx5hws_matcher *dst_matcher)
+{
+       int ret = 0;
+
+       mutex_lock(&src_matcher->tbl->ctx->ctrl_lock);
+
+       ret = hws_matcher_resize_precheck(src_matcher, dst_matcher);
+       if (ret)
+               goto out;
+
+       src_matcher->resize_dst = dst_matcher;
+
+       ret = hws_matcher_resize_init(src_matcher);
+       if (ret)
+               src_matcher->resize_dst = NULL;
+
+out:
+       mutex_unlock(&src_matcher->tbl->ctx->ctrl_lock);
+       return ret;
+}
+
+int mlx5hws_matcher_resize_rule_move(struct mlx5hws_matcher *src_matcher,
+                                    struct mlx5hws_rule *rule,
+                                    struct mlx5hws_rule_attr *attr)
+{
+       struct mlx5hws_context *ctx = src_matcher->tbl->ctx;
+
+       if (unlikely(!mlx5hws_matcher_is_in_resize(src_matcher))) {
+               mlx5hws_err(ctx, "Matcher is not resizable or not in resize\n");
+               return -EINVAL;
+       }
+
+       if (unlikely(src_matcher != rule->matcher)) {
+               mlx5hws_err(ctx, "Rule doesn't belong to src matcher\n");
+               return -EINVAL;
+       }
+
+       return mlx5hws_rule_move_hws_add(rule, attr);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
new file mode 100644 (file)
index 0000000..81ff487
--- /dev/null
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef HWS_MATCHER_H_
+#define HWS_MATCHER_H_
+
+/* We calculated that concatenating a collision table to the main table with
+ * 3% of the main table rows will be enough resources for high insertion
+ * success probability.
+ *
+ * The calculation: log2(2^x * 3 / 100) = log2(2^x) + log2(3/100) = x - 5.05 ~ 5
+ */
+#define MLX5HWS_MATCHER_ASSURED_ROW_RATIO 5
+/* Threshold to determine if amount of rules require a collision table */
+#define MLX5HWS_MATCHER_ASSURED_RULES_TH 10
+/* Required depth of an assured collision table */
+#define MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH 4
+/* Required depth of the main large table */
+#define MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH 2
+
+enum mlx5hws_matcher_offset {
+       MLX5HWS_MATCHER_OFFSET_TAG_DW1 = 12,
+       MLX5HWS_MATCHER_OFFSET_TAG_DW0 = 13,
+};
+
+enum mlx5hws_matcher_flags {
+       MLX5HWS_MATCHER_FLAGS_COLLISION = 1 << 2,
+       MLX5HWS_MATCHER_FLAGS_RESIZABLE = 1 << 3,
+};
+
+struct mlx5hws_match_template {
+       struct mlx5hws_definer *definer;
+       struct mlx5hws_definer_fc *fc;
+       u32 *match_param;
+       u8 match_criteria_enable;
+       u16 fc_sz;
+};
+
+struct mlx5hws_matcher_match_ste {
+       struct mlx5hws_pool_chunk ste;
+       u32 rtc_0_id;
+       u32 rtc_1_id;
+       struct mlx5hws_pool *pool;
+};
+
+struct mlx5hws_matcher_action_ste {
+       struct mlx5hws_pool_chunk ste;
+       struct mlx5hws_pool_chunk stc;
+       u32 rtc_0_id;
+       u32 rtc_1_id;
+       struct mlx5hws_pool *pool;
+       u8 max_stes;
+};
+
+struct mlx5hws_matcher_resize_data_node {
+       struct mlx5hws_pool_chunk stc;
+       u32 rtc_0_id;
+       u32 rtc_1_id;
+       struct mlx5hws_pool *pool;
+};
+
+struct mlx5hws_matcher_resize_data {
+       struct mlx5hws_matcher_resize_data_node action_ste[2];
+       u8 max_stes;
+       struct list_head list_node;
+};
+
+struct mlx5hws_matcher {
+       struct mlx5hws_table *tbl;
+       struct mlx5hws_matcher_attr attr;
+       struct mlx5hws_match_template *mt;
+       struct mlx5hws_action_template *at;
+       u8 num_of_at;
+       u8 num_of_mt;
+       /* enum mlx5hws_matcher_flags */
+       u8 flags;
+       u32 end_ft_id;
+       struct mlx5hws_matcher *col_matcher;
+       struct mlx5hws_matcher *resize_dst;
+       struct mlx5hws_matcher_match_ste match_ste;
+       struct mlx5hws_matcher_action_ste action_ste[2];
+       struct list_head list_node;
+       struct list_head resize_data;
+};
+
+static inline bool
+mlx5hws_matcher_mt_is_jumbo(struct mlx5hws_match_template *mt)
+{
+       return mlx5hws_definer_is_jumbo(mt->definer);
+}
+
+static inline bool mlx5hws_matcher_is_resizable(struct mlx5hws_matcher *matcher)
+{
+       return !!(matcher->flags & MLX5HWS_MATCHER_FLAGS_RESIZABLE);
+}
+
+static inline bool mlx5hws_matcher_is_in_resize(struct mlx5hws_matcher *matcher)
+{
+       return !!matcher->resize_dst;
+}
+
+static inline bool mlx5hws_matcher_is_insert_by_idx(struct mlx5hws_matcher *matcher)
+{
+       return matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX;
+}
+
+#endif /* HWS_MATCHER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c
deleted file mode 100644 (file)
index b27bb41..0000000
+++ /dev/null
@@ -1,2604 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#include "mlx5hws_internal.h"
-
-#define MLX5HWS_ACTION_METER_INIT_COLOR_OFFSET 1
-
-/* Header removal size limited to 128B (64 words) */
-#define MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE 128
-
-/* This is the longest supported action sequence for FDB table:
- * DECAP, POP_VLAN, MODIFY, CTR, ASO, PUSH_VLAN, MODIFY, ENCAP, Term.
- */
-static const u32 action_order_arr[MLX5HWS_TABLE_TYPE_MAX][MLX5HWS_ACTION_TYP_MAX] = {
-       [MLX5HWS_TABLE_TYPE_FDB] = {
-               BIT(MLX5HWS_ACTION_TYP_REMOVE_HEADER) |
-               BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) |
-               BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2),
-               BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
-               BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
-               BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
-               BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
-               BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
-               BIT(MLX5HWS_ACTION_TYP_INSERT_HEADER) |
-               BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) |
-               BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3),
-               BIT(MLX5HWS_ACTION_TYP_CTR),
-               BIT(MLX5HWS_ACTION_TYP_TAG),
-               BIT(MLX5HWS_ACTION_TYP_ASO_METER),
-               BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
-               BIT(MLX5HWS_ACTION_TYP_TBL) |
-               BIT(MLX5HWS_ACTION_TYP_VPORT) |
-               BIT(MLX5HWS_ACTION_TYP_DROP) |
-               BIT(MLX5HWS_ACTION_TYP_SAMPLER) |
-               BIT(MLX5HWS_ACTION_TYP_RANGE) |
-               BIT(MLX5HWS_ACTION_TYP_DEST_ARRAY),
-               BIT(MLX5HWS_ACTION_TYP_LAST),
-       },
-};
-
-static const char * const mlx5hws_action_type_str[] = {
-       [MLX5HWS_ACTION_TYP_LAST] = "LAST",
-       [MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2] = "TNL_L2_TO_L2",
-       [MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2] = "L2_TO_TNL_L2",
-       [MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2] = "TNL_L3_TO_L2",
-       [MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3] = "L2_TO_TNL_L3",
-       [MLX5HWS_ACTION_TYP_DROP] = "DROP",
-       [MLX5HWS_ACTION_TYP_TBL] = "TBL",
-       [MLX5HWS_ACTION_TYP_CTR] = "CTR",
-       [MLX5HWS_ACTION_TYP_TAG] = "TAG",
-       [MLX5HWS_ACTION_TYP_MODIFY_HDR] = "MODIFY_HDR",
-       [MLX5HWS_ACTION_TYP_VPORT] = "VPORT",
-       [MLX5HWS_ACTION_TYP_MISS] = "DEFAULT_MISS",
-       [MLX5HWS_ACTION_TYP_POP_VLAN] = "POP_VLAN",
-       [MLX5HWS_ACTION_TYP_PUSH_VLAN] = "PUSH_VLAN",
-       [MLX5HWS_ACTION_TYP_ASO_METER] = "ASO_METER",
-       [MLX5HWS_ACTION_TYP_DEST_ARRAY] = "DEST_ARRAY",
-       [MLX5HWS_ACTION_TYP_INSERT_HEADER] = "INSERT_HEADER",
-       [MLX5HWS_ACTION_TYP_REMOVE_HEADER] = "REMOVE_HEADER",
-       [MLX5HWS_ACTION_TYP_SAMPLER] = "SAMPLER",
-       [MLX5HWS_ACTION_TYP_RANGE] = "RANGE",
-};
-
-static_assert(ARRAY_SIZE(mlx5hws_action_type_str) == MLX5HWS_ACTION_TYP_MAX,
-             "Missing mlx5hws_action_type_str");
-
-const char *mlx5hws_action_type_to_str(enum mlx5hws_action_type action_type)
-{
-       return mlx5hws_action_type_str[action_type];
-}
-
-enum mlx5hws_action_type mlx5hws_action_get_type(struct mlx5hws_action *action)
-{
-       return action->type;
-}
-
-static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
-                                        enum mlx5hws_context_shared_stc_type stc_type,
-                                        u8 tbl_type)
-{
-       struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
-       struct mlx5hws_action_shared_stc *shared_stc;
-       int ret;
-
-       mutex_lock(&ctx->ctrl_lock);
-       if (ctx->common_res[tbl_type].shared_stc[stc_type]) {
-               ctx->common_res[tbl_type].shared_stc[stc_type]->refcount++;
-               mutex_unlock(&ctx->ctrl_lock);
-               return 0;
-       }
-
-       shared_stc = kzalloc(sizeof(*shared_stc), GFP_KERNEL);
-       if (!shared_stc) {
-               ret = -ENOMEM;
-               goto unlock_and_out;
-       }
-       switch (stc_type) {
-       case MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3:
-               stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE;
-               stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5;
-               stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
-               stc_attr.remove_header.decap = 0;
-               stc_attr.remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START;
-               stc_attr.remove_header.end_anchor = MLX5_HEADER_ANCHOR_IPV6_IPV4;
-               break;
-       case MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP:
-               stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS;
-               stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5;
-               stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
-               stc_attr.remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START;
-               stc_attr.remove_words.num_of_words = MLX5HWS_ACTION_HDR_LEN_L2_VLAN;
-               break;
-       default:
-               mlx5hws_err(ctx, "No such stc_type: %d\n", stc_type);
-               pr_warn("HWS: Invalid stc_type: %d\n", stc_type);
-               ret = -EINVAL;
-               goto unlock_and_out;
-       }
-
-       ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
-                                             &shared_stc->stc_chunk);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to allocate shared decap l2 STC\n");
-               goto free_shared_stc;
-       }
-
-       ctx->common_res[tbl_type].shared_stc[stc_type] = shared_stc;
-       ctx->common_res[tbl_type].shared_stc[stc_type]->refcount = 1;
-
-       mutex_unlock(&ctx->ctrl_lock);
-
-       return 0;
-
-free_shared_stc:
-       kfree(shared_stc);
-unlock_and_out:
-       mutex_unlock(&ctx->ctrl_lock);
-       return ret;
-}
-
-static int hws_action_get_shared_stc(struct mlx5hws_action *action,
-                                    enum mlx5hws_context_shared_stc_type stc_type)
-{
-       struct mlx5hws_context *ctx = action->ctx;
-       int ret;
-
-       if (stc_type >= MLX5HWS_CONTEXT_SHARED_STC_MAX) {
-               pr_warn("HWS: Invalid shared stc_type: %d\n", stc_type);
-               return -EINVAL;
-       }
-
-       if (unlikely(!(action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB))) {
-               pr_warn("HWS: Invalid action->flags: %d\n", action->flags);
-               return -EINVAL;
-       }
-
-       ret = hws_action_get_shared_stc_nic(ctx, stc_type, MLX5HWS_TABLE_TYPE_FDB);
-       if (ret) {
-               mlx5hws_err(ctx,
-                           "Failed to allocate memory for FDB shared STCs (type: %d)\n",
-                           stc_type);
-               return ret;
-       }
-
-       return 0;
-}
-
-static void hws_action_put_shared_stc(struct mlx5hws_action *action,
-                                     enum mlx5hws_context_shared_stc_type stc_type)
-{
-       enum mlx5hws_table_type tbl_type = MLX5HWS_TABLE_TYPE_FDB;
-       struct mlx5hws_action_shared_stc *shared_stc;
-       struct mlx5hws_context *ctx = action->ctx;
-
-       if (stc_type >= MLX5HWS_CONTEXT_SHARED_STC_MAX) {
-               pr_warn("HWS: Invalid shared stc_type: %d\n", stc_type);
-               return;
-       }
-
-       mutex_lock(&ctx->ctrl_lock);
-       if (--ctx->common_res[tbl_type].shared_stc[stc_type]->refcount) {
-               mutex_unlock(&ctx->ctrl_lock);
-               return;
-       }
-
-       shared_stc = ctx->common_res[tbl_type].shared_stc[stc_type];
-
-       mlx5hws_action_free_single_stc(ctx, tbl_type, &shared_stc->stc_chunk);
-       kfree(shared_stc);
-       ctx->common_res[tbl_type].shared_stc[stc_type] = NULL;
-       mutex_unlock(&ctx->ctrl_lock);
-}
-
-static void hws_action_print_combo(struct mlx5hws_context *ctx,
-                                  enum mlx5hws_action_type *user_actions)
-{
-       mlx5hws_err(ctx, "Invalid action_type sequence");
-       while (*user_actions != MLX5HWS_ACTION_TYP_LAST) {
-               mlx5hws_err(ctx, " %s", mlx5hws_action_type_to_str(*user_actions));
-               user_actions++;
-       }
-       mlx5hws_err(ctx, "\n");
-}
-
-bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx,
-                               enum mlx5hws_action_type *user_actions,
-                               enum mlx5hws_table_type table_type)
-{
-       const u32 *order_arr = action_order_arr[table_type];
-       u8 order_idx = 0;
-       u8 user_idx = 0;
-       bool valid_combo;
-
-       if (table_type >= MLX5HWS_TABLE_TYPE_MAX) {
-               mlx5hws_err(ctx, "Invalid table_type %d", table_type);
-               return false;
-       }
-
-       while (order_arr[order_idx] != BIT(MLX5HWS_ACTION_TYP_LAST)) {
-               /* User action order validated move to next user action */
-               if (BIT(user_actions[user_idx]) & order_arr[order_idx])
-                       user_idx++;
-
-               /* Iterate to the next supported action in the order */
-               order_idx++;
-       }
-
-       /* Combination is valid if all user action were processed */
-       valid_combo = user_actions[user_idx] == MLX5HWS_ACTION_TYP_LAST;
-       if (!valid_combo)
-               hws_action_print_combo(ctx, user_actions);
-
-       return valid_combo;
-}
-
-static bool
-hws_action_fixup_stc_attr(struct mlx5hws_context *ctx,
-                         struct mlx5hws_cmd_stc_modify_attr *stc_attr,
-                         struct mlx5hws_cmd_stc_modify_attr *fixup_stc_attr,
-                         enum mlx5hws_table_type table_type,
-                         bool is_mirror)
-{
-       bool use_fixup = false;
-       u32 fw_tbl_type;
-       u32 base_id;
-
-       fw_tbl_type = mlx5hws_table_get_res_fw_ft_type(table_type, is_mirror);
-
-       switch (stc_attr->action_type) {
-       case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE:
-               if (is_mirror && stc_attr->ste_table.ignore_tx) {
-                       fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
-                       fixup_stc_attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
-                       fixup_stc_attr->stc_offset = stc_attr->stc_offset;
-                       use_fixup = true;
-                       break;
-               }
-               if (!is_mirror)
-                       base_id = mlx5hws_pool_chunk_get_base_id(stc_attr->ste_table.ste_pool,
-                                                                &stc_attr->ste_table.ste);
-               else
-                       base_id =
-                               mlx5hws_pool_chunk_get_base_mirror_id(stc_attr->ste_table.ste_pool,
-                                                                     &stc_attr->ste_table.ste);
-
-               *fixup_stc_attr = *stc_attr;
-               fixup_stc_attr->ste_table.ste_obj_id = base_id;
-               use_fixup = true;
-               break;
-
-       case MLX5_IFC_STC_ACTION_TYPE_TAG:
-               if (fw_tbl_type == FS_FT_FDB_TX) {
-                       fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_NOP;
-                       fixup_stc_attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
-                       fixup_stc_attr->stc_offset = stc_attr->stc_offset;
-                       use_fixup = true;
-               }
-               break;
-
-       case MLX5_IFC_STC_ACTION_TYPE_ALLOW:
-               if (fw_tbl_type == FS_FT_FDB_TX || fw_tbl_type == FS_FT_FDB_RX) {
-                       fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT;
-                       fixup_stc_attr->action_offset = stc_attr->action_offset;
-                       fixup_stc_attr->stc_offset = stc_attr->stc_offset;
-                       fixup_stc_attr->vport.esw_owner_vhca_id = ctx->caps->vhca_id;
-                       fixup_stc_attr->vport.vport_num = ctx->caps->eswitch_manager_vport_number;
-                       fixup_stc_attr->vport.eswitch_owner_vhca_id_valid =
-                               ctx->caps->merged_eswitch;
-                       use_fixup = true;
-               }
-               break;
-
-       case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT:
-               if (stc_attr->vport.vport_num != MLX5_VPORT_UPLINK)
-                       break;
-
-               if (fw_tbl_type == FS_FT_FDB_TX || fw_tbl_type == FS_FT_FDB_RX) {
-                       /* The FW doesn't allow to go to wire in the TX/RX by JUMP_TO_VPORT */
-                       fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK;
-                       fixup_stc_attr->action_offset = stc_attr->action_offset;
-                       fixup_stc_attr->stc_offset = stc_attr->stc_offset;
-                       fixup_stc_attr->vport.vport_num = 0;
-                       fixup_stc_attr->vport.esw_owner_vhca_id = stc_attr->vport.esw_owner_vhca_id;
-                       fixup_stc_attr->vport.eswitch_owner_vhca_id_valid =
-                               stc_attr->vport.eswitch_owner_vhca_id_valid;
-               }
-               use_fixup = true;
-               break;
-
-       default:
-               break;
-       }
-
-       return use_fixup;
-}
-
-int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx,
-                                   struct mlx5hws_cmd_stc_modify_attr *stc_attr,
-                                   u32 table_type,
-                                   struct mlx5hws_pool_chunk *stc)
-__must_hold(&ctx->ctrl_lock)
-{
-       struct mlx5hws_cmd_stc_modify_attr cleanup_stc_attr = {0};
-       struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type];
-       struct mlx5hws_cmd_stc_modify_attr fixup_stc_attr = {0};
-       bool use_fixup;
-       u32 obj_0_id;
-       int ret;
-
-       ret = mlx5hws_pool_chunk_alloc(stc_pool, stc);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to allocate single action STC\n");
-               return ret;
-       }
-
-       stc_attr->stc_offset = stc->offset;
-
-       /* Dynamic reparse not supported, overwrite and use default */
-       if (!mlx5hws_context_cap_dynamic_reparse(ctx))
-               stc_attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
-
-       obj_0_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc);
-
-       /* According to table/action limitation change the stc_attr */
-       use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr, &fixup_stc_attr, table_type, false);
-       ret = mlx5hws_cmd_stc_modify(ctx->mdev, obj_0_id,
-                                    use_fixup ? &fixup_stc_attr : stc_attr);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to modify STC action_type %d tbl_type %d\n",
-                           stc_attr->action_type, table_type);
-               goto free_chunk;
-       }
-
-       /* Modify the FDB peer */
-       if (table_type == MLX5HWS_TABLE_TYPE_FDB) {
-               u32 obj_1_id;
-
-               obj_1_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc);
-
-               use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr,
-                                                     &fixup_stc_attr,
-                                                     table_type, true);
-               ret = mlx5hws_cmd_stc_modify(ctx->mdev, obj_1_id,
-                                            use_fixup ? &fixup_stc_attr : stc_attr);
-               if (ret) {
-                       mlx5hws_err(ctx,
-                                   "Failed to modify peer STC action_type %d tbl_type %d\n",
-                                   stc_attr->action_type, table_type);
-                       goto clean_obj_0;
-               }
-       }
-
-       return 0;
-
-clean_obj_0:
-       cleanup_stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
-       cleanup_stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
-       cleanup_stc_attr.stc_offset = stc->offset;
-       mlx5hws_cmd_stc_modify(ctx->mdev, obj_0_id, &cleanup_stc_attr);
-free_chunk:
-       mlx5hws_pool_chunk_free(stc_pool, stc);
-       return ret;
-}
-
-void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx,
-                                   u32 table_type,
-                                   struct mlx5hws_pool_chunk *stc)
-__must_hold(&ctx->ctrl_lock)
-{
-       struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type];
-       struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
-       u32 obj_id;
-
-       /* Modify the STC not to point to an object */
-       stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
-       stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
-       stc_attr.stc_offset = stc->offset;
-       obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc);
-       mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr);
-
-       if (table_type == MLX5HWS_TABLE_TYPE_FDB) {
-               obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc);
-               mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr);
-       }
-
-       mlx5hws_pool_chunk_free(stc_pool, stc);
-}
-
-static u32 hws_action_get_mh_stc_type(struct mlx5hws_context *ctx,
-                                     __be64 pattern)
-{
-       u8 action_type = MLX5_GET(set_action_in, &pattern, action_type);
-
-       switch (action_type) {
-       case MLX5_MODIFICATION_TYPE_SET:
-               return MLX5_IFC_STC_ACTION_TYPE_SET;
-       case MLX5_MODIFICATION_TYPE_ADD:
-               return MLX5_IFC_STC_ACTION_TYPE_ADD;
-       case MLX5_MODIFICATION_TYPE_COPY:
-               return MLX5_IFC_STC_ACTION_TYPE_COPY;
-       case MLX5_MODIFICATION_TYPE_ADD_FIELD:
-               return MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD;
-       default:
-               mlx5hws_err(ctx, "Unsupported action type: 0x%x\n", action_type);
-               return MLX5_IFC_STC_ACTION_TYPE_NOP;
-       }
-}
-
-static void hws_action_fill_stc_attr(struct mlx5hws_action *action,
-                                    u32 obj_id,
-                                    struct mlx5hws_cmd_stc_modify_attr *attr)
-{
-       attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
-
-       switch (action->type) {
-       case MLX5HWS_ACTION_TYP_TAG:
-               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_TAG;
-               attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
-               break;
-       case MLX5HWS_ACTION_TYP_DROP:
-               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
-               attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
-               break;
-       case MLX5HWS_ACTION_TYP_MISS:
-               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW;
-               attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
-               break;
-       case MLX5HWS_ACTION_TYP_CTR:
-               attr->id = obj_id;
-               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_COUNTER;
-               attr->action_offset = MLX5HWS_ACTION_OFFSET_DW0;
-               break;
-       case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
-       case MLX5HWS_ACTION_TYP_MODIFY_HDR:
-               attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
-               attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
-               if (action->modify_header.require_reparse)
-                       attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
-
-               if (action->modify_header.num_of_actions == 1) {
-                       attr->modify_action.data = action->modify_header.single_action;
-                       attr->action_type = hws_action_get_mh_stc_type(action->ctx,
-                                                                      attr->modify_action.data);
-
-                       if (attr->action_type == MLX5_IFC_STC_ACTION_TYPE_ADD ||
-                           attr->action_type == MLX5_IFC_STC_ACTION_TYPE_SET)
-                               MLX5_SET(set_action_in, &attr->modify_action.data, data, 0);
-               } else {
-                       attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST;
-                       attr->modify_header.arg_id = action->modify_header.arg_id;
-                       attr->modify_header.pattern_id = action->modify_header.pat_id;
-               }
-               break;
-       case MLX5HWS_ACTION_TYP_TBL:
-       case MLX5HWS_ACTION_TYP_DEST_ARRAY:
-               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT;
-               attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
-               attr->dest_table_id = obj_id;
-               break;
-       case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
-               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE;
-               attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
-               attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
-               attr->remove_header.decap = 1;
-               attr->remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START;
-               attr->remove_header.end_anchor = MLX5_HEADER_ANCHOR_INNER_MAC;
-               break;
-       case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
-       case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
-       case MLX5HWS_ACTION_TYP_INSERT_HEADER:
-               attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
-               if (!action->reformat.require_reparse)
-                       attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
-
-               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT;
-               attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
-               attr->insert_header.encap = action->reformat.encap;
-               attr->insert_header.insert_anchor = action->reformat.anchor;
-               attr->insert_header.arg_id = action->reformat.arg_id;
-               attr->insert_header.header_size = action->reformat.header_size;
-               attr->insert_header.insert_offset = action->reformat.offset;
-               break;
-       case MLX5HWS_ACTION_TYP_ASO_METER:
-               attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
-               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ASO;
-               attr->aso.aso_type = ASO_OPC_MOD_POLICER;
-               attr->aso.devx_obj_id = obj_id;
-               attr->aso.return_reg_id = action->aso.return_reg_id;
-               break;
-       case MLX5HWS_ACTION_TYP_VPORT:
-               attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
-               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT;
-               attr->vport.vport_num = action->vport.vport_num;
-               attr->vport.esw_owner_vhca_id = action->vport.esw_owner_vhca_id;
-               attr->vport.eswitch_owner_vhca_id_valid = action->vport.esw_owner_vhca_id_valid;
-               break;
-       case MLX5HWS_ACTION_TYP_POP_VLAN:
-               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS;
-               attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
-               attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
-               attr->remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START;
-               attr->remove_words.num_of_words = MLX5HWS_ACTION_HDR_LEN_L2_VLAN / 2;
-               break;
-       case MLX5HWS_ACTION_TYP_PUSH_VLAN:
-               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT;
-               attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
-               attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
-               attr->insert_header.encap = 0;
-               attr->insert_header.is_inline = 1;
-               attr->insert_header.insert_anchor = MLX5_HEADER_ANCHOR_PACKET_START;
-               attr->insert_header.insert_offset = MLX5HWS_ACTION_HDR_LEN_L2_MACS;
-               attr->insert_header.header_size = MLX5HWS_ACTION_HDR_LEN_L2_VLAN;
-               break;
-       case MLX5HWS_ACTION_TYP_REMOVE_HEADER:
-               attr->action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS;
-               attr->remove_header.decap = 0; /* the mode we support decap is 0 */
-               attr->remove_words.start_anchor = action->remove_header.anchor;
-               /* the size is in already in words */
-               attr->remove_words.num_of_words = action->remove_header.size;
-               attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
-               attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
-               break;
-       default:
-               mlx5hws_err(action->ctx, "Invalid action type %d\n", action->type);
-       }
-}
-
-static int
-hws_action_create_stcs(struct mlx5hws_action *action, u32 obj_id)
-{
-       struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
-       struct mlx5hws_context *ctx = action->ctx;
-       int ret;
-
-       hws_action_fill_stc_attr(action, obj_id, &stc_attr);
-
-       /* Block unsupported parallel obj modify over the same base */
-       mutex_lock(&ctx->ctrl_lock);
-
-       /* Allocate STC for FDB */
-       if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB) {
-               ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr,
-                                                     MLX5HWS_TABLE_TYPE_FDB,
-                                                     &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
-               if (ret)
-                       goto out_err;
-       }
-
-       mutex_unlock(&ctx->ctrl_lock);
-
-       return 0;
-
-out_err:
-       mutex_unlock(&ctx->ctrl_lock);
-       return ret;
-}
-
-static void
-hws_action_destroy_stcs(struct mlx5hws_action *action)
-{
-       struct mlx5hws_context *ctx = action->ctx;
-
-       /* Block unsupported parallel obj modify over the same base */
-       mutex_lock(&ctx->ctrl_lock);
-
-       if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB)
-               mlx5hws_action_free_single_stc(ctx, MLX5HWS_TABLE_TYPE_FDB,
-                                              &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
-
-       mutex_unlock(&ctx->ctrl_lock);
-}
-
-static bool hws_action_is_flag_hws_fdb(u32 flags)
-{
-       return flags & MLX5HWS_ACTION_FLAG_HWS_FDB;
-}
-
-static bool
-hws_action_validate_hws_action(struct mlx5hws_context *ctx, u32 flags)
-{
-       if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT)) {
-               mlx5hws_err(ctx, "Cannot create HWS action since HWS is not supported\n");
-               return false;
-       }
-
-       if ((flags & MLX5HWS_ACTION_FLAG_HWS_FDB) && !ctx->caps->eswitch_manager) {
-               mlx5hws_err(ctx, "Cannot create HWS action for FDB for non-eswitch-manager\n");
-               return false;
-       }
-
-       return true;
-}
-
-static struct mlx5hws_action *
-hws_action_create_generic_bulk(struct mlx5hws_context *ctx,
-                              u32 flags,
-                              enum mlx5hws_action_type action_type,
-                              u8 bulk_sz)
-{
-       struct mlx5hws_action *action;
-       int i;
-
-       if (!hws_action_is_flag_hws_fdb(flags)) {
-               mlx5hws_err(ctx,
-                           "Action (type: %d) flags must specify only HWS FDB\n", action_type);
-               return NULL;
-       }
-
-       if (!hws_action_validate_hws_action(ctx, flags))
-               return NULL;
-
-       action = kcalloc(bulk_sz, sizeof(*action), GFP_KERNEL);
-       if (!action)
-               return NULL;
-
-       for (i = 0; i < bulk_sz; i++) {
-               action[i].ctx = ctx;
-               action[i].flags = flags;
-               action[i].type = action_type;
-       }
-
-       return action;
-}
-
-static struct mlx5hws_action *
-hws_action_create_generic(struct mlx5hws_context *ctx,
-                         u32 flags,
-                         enum mlx5hws_action_type action_type)
-{
-       return hws_action_create_generic_bulk(ctx, flags, action_type, 1);
-}
-
-struct mlx5hws_action *
-mlx5hws_action_create_dest_table_num(struct mlx5hws_context *ctx,
-                                    u32 table_id,
-                                    u32 flags)
-{
-       struct mlx5hws_action *action;
-       int ret;
-
-       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_TBL);
-       if (!action)
-               return NULL;
-
-       ret = hws_action_create_stcs(action, table_id);
-       if (ret)
-               goto free_action;
-
-       action->dest_obj.obj_id = table_id;
-
-       return action;
-
-free_action:
-       kfree(action);
-       return NULL;
-}
-
-struct mlx5hws_action *
-mlx5hws_action_create_dest_table(struct mlx5hws_context *ctx,
-                                struct mlx5hws_table *tbl,
-                                u32 flags)
-{
-       return mlx5hws_action_create_dest_table_num(ctx, tbl->ft_id, flags);
-}
-
-struct mlx5hws_action *
-mlx5hws_action_create_dest_drop(struct mlx5hws_context *ctx, u32 flags)
-{
-       struct mlx5hws_action *action;
-       int ret;
-
-       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_DROP);
-       if (!action)
-               return NULL;
-
-       ret = hws_action_create_stcs(action, 0);
-       if (ret)
-               goto free_action;
-
-       return action;
-
-free_action:
-       kfree(action);
-       return NULL;
-}
-
-struct mlx5hws_action *
-mlx5hws_action_create_default_miss(struct mlx5hws_context *ctx, u32 flags)
-{
-       struct mlx5hws_action *action;
-       int ret;
-
-       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_MISS);
-       if (!action)
-               return NULL;
-
-       ret = hws_action_create_stcs(action, 0);
-       if (ret)
-               goto free_action;
-
-       return action;
-
-free_action:
-       kfree(action);
-       return NULL;
-}
-
-struct mlx5hws_action *
-mlx5hws_action_create_tag(struct mlx5hws_context *ctx, u32 flags)
-{
-       struct mlx5hws_action *action;
-       int ret;
-
-       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_TAG);
-       if (!action)
-               return NULL;
-
-       ret = hws_action_create_stcs(action, 0);
-       if (ret)
-               goto free_action;
-
-       return action;
-
-free_action:
-       kfree(action);
-       return NULL;
-}
-
-static struct mlx5hws_action *
-hws_action_create_aso(struct mlx5hws_context *ctx,
-                     enum mlx5hws_action_type action_type,
-                     u32 obj_id,
-                     u8 return_reg_id,
-                     u32 flags)
-{
-       struct mlx5hws_action *action;
-       int ret;
-
-       action = hws_action_create_generic(ctx, flags, action_type);
-       if (!action)
-               return NULL;
-
-       action->aso.obj_id = obj_id;
-       action->aso.return_reg_id = return_reg_id;
-
-       ret = hws_action_create_stcs(action, obj_id);
-       if (ret)
-               goto free_action;
-
-       return action;
-
-free_action:
-       kfree(action);
-       return NULL;
-}
-
-struct mlx5hws_action *
-mlx5hws_action_create_aso_meter(struct mlx5hws_context *ctx,
-                               u32 obj_id,
-                               u8 return_reg_id,
-                               u32 flags)
-{
-       return hws_action_create_aso(ctx, MLX5HWS_ACTION_TYP_ASO_METER,
-                                    obj_id, return_reg_id, flags);
-}
-
-struct mlx5hws_action *
-mlx5hws_action_create_counter(struct mlx5hws_context *ctx,
-                             u32 obj_id,
-                             u32 flags)
-{
-       struct mlx5hws_action *action;
-       int ret;
-
-       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_CTR);
-       if (!action)
-               return NULL;
-
-       ret = hws_action_create_stcs(action, obj_id);
-       if (ret)
-               goto free_action;
-
-       return action;
-
-free_action:
-       kfree(action);
-       return NULL;
-}
-
-struct mlx5hws_action *
-mlx5hws_action_create_dest_vport(struct mlx5hws_context *ctx,
-                                u16 vport_num,
-                                bool vhca_id_valid,
-                                u16 vhca_id,
-                                u32 flags)
-{
-       struct mlx5hws_action *action;
-       int ret;
-
-       if (!(flags & MLX5HWS_ACTION_FLAG_HWS_FDB)) {
-               mlx5hws_err(ctx, "Vport action is supported for FDB only\n");
-               return NULL;
-       }
-
-       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_VPORT);
-       if (!action)
-               return NULL;
-
-       if (!ctx->caps->merged_eswitch && vhca_id_valid && vhca_id != ctx->caps->vhca_id) {
-               mlx5hws_err(ctx, "Non merged eswitch cannot send to other vhca\n");
-               goto free_action;
-       }
-
-       action->vport.vport_num = vport_num;
-       action->vport.esw_owner_vhca_id_valid = vhca_id_valid;
-
-       if (vhca_id_valid)
-               action->vport.esw_owner_vhca_id = vhca_id;
-
-       ret = hws_action_create_stcs(action, 0);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed creating stc for vport %d\n", vport_num);
-               goto free_action;
-       }
-
-       return action;
-
-free_action:
-       kfree(action);
-       return NULL;
-}
-
-struct mlx5hws_action *
-mlx5hws_action_create_push_vlan(struct mlx5hws_context *ctx, u32 flags)
-{
-       struct mlx5hws_action *action;
-       int ret;
-
-       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_PUSH_VLAN);
-       if (!action)
-               return NULL;
-
-       ret = hws_action_create_stcs(action, 0);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed creating stc for push vlan\n");
-               goto free_action;
-       }
-
-       return action;
-
-free_action:
-       kfree(action);
-       return NULL;
-}
-
-struct mlx5hws_action *
-mlx5hws_action_create_pop_vlan(struct mlx5hws_context *ctx, u32 flags)
-{
-       struct mlx5hws_action *action;
-       int ret;
-
-       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_POP_VLAN);
-       if (!action)
-               return NULL;
-
-       ret = hws_action_get_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to create remove stc for reformat\n");
-               goto free_action;
-       }
-
-       ret = hws_action_create_stcs(action, 0);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed creating stc for pop vlan\n");
-               goto free_shared;
-       }
-
-       return action;
-
-free_shared:
-       hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP);
-free_action:
-       kfree(action);
-       return NULL;
-}
-
-static int
-hws_action_handle_insert_with_ptr(struct mlx5hws_action *action,
-                                 u8 num_of_hdrs,
-                                 struct mlx5hws_action_reformat_header *hdrs,
-                                 u32 log_bulk_sz)
-{
-       size_t max_sz = 0;
-       u32 arg_id;
-       int ret, i;
-
-       for (i = 0; i < num_of_hdrs; i++) {
-               if (hdrs[i].sz % W_SIZE != 0) {
-                       mlx5hws_err(action->ctx,
-                                   "Header data size should be in WORD granularity\n");
-                       return -EINVAL;
-               }
-               max_sz = max(hdrs[i].sz, max_sz);
-       }
-
-       /* Allocate single shared arg object for all headers */
-       ret = mlx5hws_arg_create(action->ctx,
-                                hdrs->data,
-                                max_sz,
-                                log_bulk_sz,
-                                action->flags & MLX5HWS_ACTION_FLAG_SHARED,
-                                &arg_id);
-       if (ret)
-               return ret;
-
-       for (i = 0; i < num_of_hdrs; i++) {
-               action[i].reformat.arg_id = arg_id;
-               action[i].reformat.header_size = hdrs[i].sz;
-               action[i].reformat.num_of_hdrs = num_of_hdrs;
-               action[i].reformat.max_hdr_sz = max_sz;
-               action[i].reformat.require_reparse = true;
-
-               if (action[i].type == MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2 ||
-                   action[i].type == MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3) {
-                       action[i].reformat.anchor = MLX5_HEADER_ANCHOR_PACKET_START;
-                       action[i].reformat.offset = 0;
-                       action[i].reformat.encap = 1;
-               }
-
-               ret = hws_action_create_stcs(&action[i], 0);
-               if (ret) {
-                       mlx5hws_err(action->ctx, "Failed to create stc for reformat\n");
-                       goto free_stc;
-               }
-       }
-
-       return 0;
-
-free_stc:
-       while (i--)
-               hws_action_destroy_stcs(&action[i]);
-
-       mlx5hws_arg_destroy(action->ctx, arg_id);
-       return ret;
-}
-
-static int
-hws_action_handle_l2_to_tunnel_l3(struct mlx5hws_action *action,
-                                 u8 num_of_hdrs,
-                                 struct mlx5hws_action_reformat_header *hdrs,
-                                 u32 log_bulk_sz)
-{
-       int ret;
-
-       /* The action is remove-l2-header + insert-l3-header */
-       ret = hws_action_get_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3);
-       if (ret) {
-               mlx5hws_err(action->ctx, "Failed to create remove stc for reformat\n");
-               return ret;
-       }
-
-       /* Reuse the insert with pointer for the L2L3 header */
-       ret = hws_action_handle_insert_with_ptr(action,
-                                               num_of_hdrs,
-                                               hdrs,
-                                               log_bulk_sz);
-       if (ret)
-               goto put_shared_stc;
-
-       return 0;
-
-put_shared_stc:
-       hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3);
-       return ret;
-}
-
-static void hws_action_prepare_decap_l3_actions(size_t data_sz,
-                                               u8 *mh_data,
-                                               int *num_of_actions)
-{
-       int actions;
-       u32 i;
-
-       /* Remove L2L3 outer headers */
-       MLX5_SET(stc_ste_param_remove, mh_data, action_type,
-                MLX5_MODIFICATION_TYPE_REMOVE);
-       MLX5_SET(stc_ste_param_remove, mh_data, decap, 0x1);
-       MLX5_SET(stc_ste_param_remove, mh_data, remove_start_anchor,
-                MLX5_HEADER_ANCHOR_PACKET_START);
-       MLX5_SET(stc_ste_param_remove, mh_data, remove_end_anchor,
-                MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4);
-       mh_data += MLX5HWS_ACTION_DOUBLE_SIZE; /* Assume every action is 2 dw */
-       actions = 1;
-
-       /* Add the new header using inline action 4Byte at a time, the header
-        * is added in reversed order to the beginning of the packet to avoid
-        * incorrect parsing by the HW. Since header is 14B or 18B an extra
-        * two bytes are padded and later removed.
-        */
-       for (i = 0; i < data_sz / MLX5HWS_ACTION_INLINE_DATA_SIZE + 1; i++) {
-               MLX5_SET(stc_ste_param_insert, mh_data, action_type,
-                        MLX5_MODIFICATION_TYPE_INSERT);
-               MLX5_SET(stc_ste_param_insert, mh_data, inline_data, 0x1);
-               MLX5_SET(stc_ste_param_insert, mh_data, insert_anchor,
-                        MLX5_HEADER_ANCHOR_PACKET_START);
-               MLX5_SET(stc_ste_param_insert, mh_data, insert_size, 2);
-               mh_data += MLX5HWS_ACTION_DOUBLE_SIZE;
-               actions++;
-       }
-
-       /* Remove first 2 extra bytes */
-       MLX5_SET(stc_ste_param_remove_words, mh_data, action_type,
-                MLX5_MODIFICATION_TYPE_REMOVE_WORDS);
-       MLX5_SET(stc_ste_param_remove_words, mh_data, remove_start_anchor,
-                MLX5_HEADER_ANCHOR_PACKET_START);
-       /* The hardware expects here size in words (2 bytes) */
-       MLX5_SET(stc_ste_param_remove_words, mh_data, remove_size, 1);
-       actions++;
-
-       *num_of_actions = actions;
-}
-
-static int
-hws_action_handle_tunnel_l3_to_l2(struct mlx5hws_action *action,
-                                 u8 num_of_hdrs,
-                                 struct mlx5hws_action_reformat_header *hdrs,
-                                 u32 log_bulk_sz)
-{
-       u8 mh_data[MLX5HWS_ACTION_REFORMAT_DATA_SIZE] = {0};
-       struct mlx5hws_context *ctx = action->ctx;
-       u32 arg_id, pat_id;
-       int num_of_actions;
-       int mh_data_size;
-       int ret, i;
-
-       for (i = 0; i < num_of_hdrs; i++) {
-               if (hdrs[i].sz != MLX5HWS_ACTION_HDR_LEN_L2 &&
-                   hdrs[i].sz != MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN) {
-                       mlx5hws_err(ctx, "Data size is not supported for decap-l3\n");
-                       return -EINVAL;
-               }
-       }
-
-       /* Create a full modify header action list in case shared */
-       hws_action_prepare_decap_l3_actions(hdrs->sz, mh_data, &num_of_actions);
-       if (action->flags & MLX5HWS_ACTION_FLAG_SHARED)
-               mlx5hws_action_prepare_decap_l3_data(hdrs->data, mh_data, num_of_actions);
-
-       /* All DecapL3 cases require the same max arg size */
-       ret = mlx5hws_arg_create_modify_header_arg(ctx,
-                                                  (__be64 *)mh_data,
-                                                  num_of_actions,
-                                                  log_bulk_sz,
-                                                  action->flags & MLX5HWS_ACTION_FLAG_SHARED,
-                                                  &arg_id);
-       if (ret)
-               return ret;
-
-       for (i = 0; i < num_of_hdrs; i++) {
-               memset(mh_data, 0, MLX5HWS_ACTION_REFORMAT_DATA_SIZE);
-               hws_action_prepare_decap_l3_actions(hdrs[i].sz, mh_data, &num_of_actions);
-               mh_data_size = num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE;
-
-               ret = mlx5hws_pat_get_pattern(ctx, (__be64 *)mh_data, mh_data_size, &pat_id);
-               if (ret) {
-                       mlx5hws_err(ctx, "Failed to allocate pattern for DecapL3\n");
-                       goto free_stc_and_pat;
-               }
-
-               action[i].modify_header.max_num_of_actions = num_of_actions;
-               action[i].modify_header.num_of_actions = num_of_actions;
-               action[i].modify_header.num_of_patterns = num_of_hdrs;
-               action[i].modify_header.arg_id = arg_id;
-               action[i].modify_header.pat_id = pat_id;
-               action[i].modify_header.require_reparse =
-                       mlx5hws_pat_require_reparse((__be64 *)mh_data, num_of_actions);
-
-               ret = hws_action_create_stcs(&action[i], 0);
-               if (ret) {
-                       mlx5hws_pat_put_pattern(ctx, pat_id);
-                       goto free_stc_and_pat;
-               }
-       }
-
-       return 0;
-
-free_stc_and_pat:
-       while (i--) {
-               hws_action_destroy_stcs(&action[i]);
-               mlx5hws_pat_put_pattern(ctx, action[i].modify_header.pat_id);
-       }
-
-       mlx5hws_arg_destroy(action->ctx, arg_id);
-       return ret;
-}
-
-static int
-hws_action_create_reformat_hws(struct mlx5hws_action *action,
-                              u8 num_of_hdrs,
-                              struct mlx5hws_action_reformat_header *hdrs,
-                              u32 bulk_size)
-{
-       int ret;
-
-       switch (action->type) {
-       case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
-               ret = hws_action_create_stcs(action, 0);
-               break;
-       case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
-               ret = hws_action_handle_insert_with_ptr(action, num_of_hdrs, hdrs, bulk_size);
-               break;
-       case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
-               ret = hws_action_handle_l2_to_tunnel_l3(action, num_of_hdrs, hdrs, bulk_size);
-               break;
-       case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
-               ret = hws_action_handle_tunnel_l3_to_l2(action, num_of_hdrs, hdrs, bulk_size);
-               break;
-       default:
-               mlx5hws_err(action->ctx, "Invalid HWS reformat action type\n");
-               return -EINVAL;
-       }
-
-       return ret;
-}
-
-struct mlx5hws_action *
-mlx5hws_action_create_reformat(struct mlx5hws_context *ctx,
-                              enum mlx5hws_action_type reformat_type,
-                              u8 num_of_hdrs,
-                              struct mlx5hws_action_reformat_header *hdrs,
-                              u32 log_bulk_size,
-                              u32 flags)
-{
-       struct mlx5hws_action *action;
-       int ret;
-
-       if (!num_of_hdrs) {
-               mlx5hws_err(ctx, "Reformat num_of_hdrs cannot be zero\n");
-               return NULL;
-       }
-
-       action = hws_action_create_generic_bulk(ctx, flags, reformat_type, num_of_hdrs);
-       if (!action)
-               return NULL;
-
-       if ((flags & MLX5HWS_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_hdrs > 1)) {
-               mlx5hws_err(ctx, "Reformat flags don't fit HWS (flags: 0x%x)\n", flags);
-               goto free_action;
-       }
-
-       ret = hws_action_create_reformat_hws(action, num_of_hdrs, hdrs, log_bulk_size);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to create HWS reformat action\n");
-               goto free_action;
-       }
-
-       return action;
-
-free_action:
-       kfree(action);
-       return NULL;
-}
-
-static int
-hws_action_create_modify_header_hws(struct mlx5hws_action *action,
-                                   u8 num_of_patterns,
-                                   struct mlx5hws_action_mh_pattern *pattern,
-                                   u32 log_bulk_size)
-{
-       struct mlx5hws_context *ctx = action->ctx;
-       u16 num_actions, max_mh_actions = 0;
-       int i, ret, size_in_bytes;
-       u32 pat_id, arg_id = 0;
-       __be64 *new_pattern;
-       size_t pat_max_sz;
-
-       pat_max_sz = MLX5HWS_ARG_CHUNK_SIZE_MAX * MLX5HWS_ARG_DATA_SIZE;
-       size_in_bytes = pat_max_sz * sizeof(__be64);
-       new_pattern = kcalloc(num_of_patterns, size_in_bytes, GFP_KERNEL);
-       if (!new_pattern)
-               return -ENOMEM;
-
-       /* Calculate maximum number of mh actions for shared arg allocation */
-       for (i = 0; i < num_of_patterns; i++) {
-               size_t new_num_actions;
-               size_t cur_num_actions;
-               u32 nope_location;
-
-               cur_num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE;
-
-               mlx5hws_pat_calc_nope(pattern[i].data, cur_num_actions,
-                                     pat_max_sz / MLX5HWS_MODIFY_ACTION_SIZE,
-                                     &new_num_actions, &nope_location,
-                                     &new_pattern[i * pat_max_sz]);
-
-               action[i].modify_header.nope_locations = nope_location;
-               action[i].modify_header.num_of_actions = new_num_actions;
-
-               max_mh_actions = max(max_mh_actions, new_num_actions);
-       }
-
-       if (mlx5hws_arg_get_arg_log_size(max_mh_actions) >= MLX5HWS_ARG_CHUNK_SIZE_MAX) {
-               mlx5hws_err(ctx, "Num of actions (%d) bigger than allowed\n",
-                           max_mh_actions);
-               ret = -EINVAL;
-               goto free_new_pat;
-       }
-
-       /* Allocate single shared arg for all patterns based on the max size */
-       if (max_mh_actions > 1) {
-               ret = mlx5hws_arg_create_modify_header_arg(ctx,
-                                                          pattern->data,
-                                                          max_mh_actions,
-                                                          log_bulk_size,
-                                                          action->flags &
-                                                          MLX5HWS_ACTION_FLAG_SHARED,
-                                                          &arg_id);
-               if (ret)
-                       goto free_new_pat;
-       }
-
-       for (i = 0; i < num_of_patterns; i++) {
-               if (!mlx5hws_pat_verify_actions(ctx, pattern[i].data, pattern[i].sz)) {
-                       mlx5hws_err(ctx, "Fail to verify pattern modify actions\n");
-                       ret = -EINVAL;
-                       goto free_stc_and_pat;
-               }
-               num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE;
-               action[i].modify_header.num_of_patterns = num_of_patterns;
-               action[i].modify_header.max_num_of_actions = max_mh_actions;
-
-               action[i].modify_header.require_reparse =
-                       mlx5hws_pat_require_reparse(pattern[i].data, num_actions);
-
-               if (num_actions == 1) {
-                       pat_id = 0;
-                       /* Optimize single modify action to be used inline */
-                       action[i].modify_header.single_action = pattern[i].data[0];
-                       action[i].modify_header.single_action_type =
-                               MLX5_GET(set_action_in, pattern[i].data, action_type);
-               } else {
-                       /* Multiple modify actions require a pattern */
-                       if (unlikely(action[i].modify_header.nope_locations)) {
-                               size_t pattern_sz;
-
-                               pattern_sz = action[i].modify_header.num_of_actions *
-                                            MLX5HWS_MODIFY_ACTION_SIZE;
-                               ret =
-                               mlx5hws_pat_get_pattern(ctx,
-                                                       &new_pattern[i * pat_max_sz],
-                                                       pattern_sz, &pat_id);
-                       } else {
-                               ret = mlx5hws_pat_get_pattern(ctx,
-                                                             pattern[i].data,
-                                                             pattern[i].sz,
-                                                             &pat_id);
-                       }
-                       if (ret) {
-                               mlx5hws_err(ctx,
-                                           "Failed to allocate pattern for modify header\n");
-                               goto free_stc_and_pat;
-                       }
-
-                       action[i].modify_header.arg_id = arg_id;
-                       action[i].modify_header.pat_id = pat_id;
-               }
-               /* Allocate STC for each action representing a header */
-               ret = hws_action_create_stcs(&action[i], 0);
-               if (ret) {
-                       if (pat_id)
-                               mlx5hws_pat_put_pattern(ctx, pat_id);
-                       goto free_stc_and_pat;
-               }
-       }
-
-       kfree(new_pattern);
-       return 0;
-
-free_stc_and_pat:
-       while (i--) {
-               hws_action_destroy_stcs(&action[i]);
-               if (action[i].modify_header.pat_id)
-                       mlx5hws_pat_put_pattern(ctx, action[i].modify_header.pat_id);
-       }
-
-       if (arg_id)
-               mlx5hws_arg_destroy(ctx, arg_id);
-free_new_pat:
-       kfree(new_pattern);
-       return ret;
-}
-
-struct mlx5hws_action *
-mlx5hws_action_create_modify_header(struct mlx5hws_context *ctx,
-                                   u8 num_of_patterns,
-                                   struct mlx5hws_action_mh_pattern *patterns,
-                                   u32 log_bulk_size,
-                                   u32 flags)
-{
-       struct mlx5hws_action *action;
-       int ret;
-
-       if (!num_of_patterns) {
-               mlx5hws_err(ctx, "Invalid number of patterns\n");
-               return NULL;
-       }
-       action = hws_action_create_generic_bulk(ctx, flags,
-                                               MLX5HWS_ACTION_TYP_MODIFY_HDR,
-                                               num_of_patterns);
-       if (!action)
-               return NULL;
-
-       if ((flags & MLX5HWS_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_patterns > 1)) {
-               mlx5hws_err(ctx, "Action cannot be shared with requested pattern or size\n");
-               goto free_action;
-       }
-
-       ret = hws_action_create_modify_header_hws(action,
-                                                 num_of_patterns,
-                                                 patterns,
-                                                 log_bulk_size);
-       if (ret)
-               goto free_action;
-
-       return action;
-
-free_action:
-       kfree(action);
-       return NULL;
-}
-
-struct mlx5hws_action *
-mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
-                                size_t num_dest,
-                                struct mlx5hws_action_dest_attr *dests,
-                                bool ignore_flow_level,
-                                u32 flow_source,
-                                u32 flags)
-{
-       struct mlx5hws_cmd_set_fte_dest *dest_list = NULL;
-       struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
-       struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
-       struct mlx5hws_cmd_forward_tbl *fw_island;
-       struct mlx5hws_action *action;
-       u32 i /*, packet_reformat_id*/;
-       int ret;
-
-       if (num_dest <= 1) {
-               mlx5hws_err(ctx, "Action must have multiple dests\n");
-               return NULL;
-       }
-
-       if (flags == (MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED)) {
-               ft_attr.type = FS_FT_FDB;
-               ft_attr.level = ctx->caps->fdb_ft.max_level - 1;
-       } else {
-               mlx5hws_err(ctx, "Action flags not supported\n");
-               return NULL;
-       }
-
-       dest_list = kcalloc(num_dest, sizeof(*dest_list), GFP_KERNEL);
-       if (!dest_list)
-               return NULL;
-
-       for (i = 0; i < num_dest; i++) {
-               enum mlx5hws_action_type action_type = dests[i].dest->type;
-               struct mlx5hws_action *reformat_action = dests[i].reformat;
-
-               switch (action_type) {
-               case MLX5HWS_ACTION_TYP_TBL:
-                       dest_list[i].destination_type =
-                               MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-                       dest_list[i].destination_id = dests[i].dest->dest_obj.obj_id;
-                       fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
-                       fte_attr.ignore_flow_level = ignore_flow_level;
-                       /* ToDo: In SW steering we have a handling of 'go to WIRE'
-                        * destination here by upper layer setting 'is_wire_ft' flag
-                        * if the destination is wire.
-                        * This is because uplink should be last dest in the list.
-                        */
-                       break;
-               case MLX5HWS_ACTION_TYP_VPORT:
-                       dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
-                       dest_list[i].destination_id = dests[i].dest->vport.vport_num;
-                       fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
-                       if (ctx->caps->merged_eswitch) {
-                               dest_list[i].ext_flags |=
-                                       MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID;
-                               dest_list[i].esw_owner_vhca_id =
-                                       dests[i].dest->vport.esw_owner_vhca_id;
-                       }
-                       break;
-               default:
-                       mlx5hws_err(ctx, "Unsupported action in dest_array\n");
-                       goto free_dest_list;
-               }
-
-               if (reformat_action) {
-                       mlx5hws_err(ctx, "dest_array with reformat action - unsupported\n");
-                       goto free_dest_list;
-               }
-       }
-
-       fte_attr.dests_num = num_dest;
-       fte_attr.dests = dest_list;
-
-       fw_island = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr);
-       if (!fw_island)
-               goto free_dest_list;
-
-       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_DEST_ARRAY);
-       if (!action)
-               goto destroy_fw_island;
-
-       ret = hws_action_create_stcs(action, fw_island->ft_id);
-       if (ret)
-               goto free_action;
-
-       action->dest_array.fw_island = fw_island;
-       action->dest_array.num_dest = num_dest;
-       action->dest_array.dest_list = dest_list;
-
-       return action;
-
-free_action:
-       kfree(action);
-destroy_fw_island:
-       mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, fw_island);
-free_dest_list:
-       for (i = 0; i < num_dest; i++) {
-               if (dest_list[i].ext_reformat_id)
-                       mlx5hws_cmd_packet_reformat_destroy(ctx->mdev,
-                                                           dest_list[i].ext_reformat_id);
-       }
-       kfree(dest_list);
-       return NULL;
-}
-
-struct mlx5hws_action *
-mlx5hws_action_create_insert_header(struct mlx5hws_context *ctx,
-                                   u8 num_of_hdrs,
-                                   struct mlx5hws_action_insert_header *hdrs,
-                                   u32 log_bulk_size,
-                                   u32 flags)
-{
-       struct mlx5hws_action_reformat_header *reformat_hdrs;
-       struct mlx5hws_action *action;
-       int ret;
-       int i;
-
-       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_INSERT_HEADER);
-       if (!action)
-               return NULL;
-
-       reformat_hdrs = kcalloc(num_of_hdrs, sizeof(*reformat_hdrs), GFP_KERNEL);
-       if (!reformat_hdrs)
-               goto free_action;
-
-       for (i = 0; i < num_of_hdrs; i++) {
-               if (hdrs[i].offset % W_SIZE != 0) {
-                       mlx5hws_err(ctx, "Header offset should be in WORD granularity\n");
-                       goto free_reformat_hdrs;
-               }
-
-               action[i].reformat.anchor = hdrs[i].anchor;
-               action[i].reformat.encap = hdrs[i].encap;
-               action[i].reformat.offset = hdrs[i].offset;
-
-               reformat_hdrs[i].sz = hdrs[i].hdr.sz;
-               reformat_hdrs[i].data = hdrs[i].hdr.data;
-       }
-
-       ret = hws_action_handle_insert_with_ptr(action, num_of_hdrs,
-                                               reformat_hdrs, log_bulk_size);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to create HWS reformat action\n");
-               goto free_reformat_hdrs;
-       }
-
-       kfree(reformat_hdrs);
-
-       return action;
-
-free_reformat_hdrs:
-       kfree(reformat_hdrs);
-free_action:
-       kfree(action);
-       return NULL;
-}
-
-struct mlx5hws_action *
-mlx5hws_action_create_remove_header(struct mlx5hws_context *ctx,
-                                   struct mlx5hws_action_remove_header_attr *attr,
-                                   u32 flags)
-{
-       struct mlx5hws_action *action;
-
-       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_REMOVE_HEADER);
-       if (!action)
-               return NULL;
-
-       /* support only remove anchor with size */
-       if (attr->size % W_SIZE != 0) {
-               mlx5hws_err(ctx,
-                           "Invalid size, HW supports header remove in WORD granularity\n");
-               goto free_action;
-       }
-
-       if (attr->size > MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE) {
-               mlx5hws_err(ctx, "Header removal size limited to %u bytes\n",
-                           MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE);
-               goto free_action;
-       }
-
-       action->remove_header.anchor = attr->anchor;
-       action->remove_header.size = attr->size / W_SIZE;
-
-       if (hws_action_create_stcs(action, 0))
-               goto free_action;
-
-       return action;
-
-free_action:
-       kfree(action);
-       return NULL;
-}
-
-static struct mlx5hws_definer *
-hws_action_create_dest_match_range_definer(struct mlx5hws_context *ctx)
-{
-       struct mlx5hws_definer *definer;
-       __be32 *tag;
-       int ret;
-
-       definer = kzalloc(sizeof(*definer), GFP_KERNEL);
-       if (!definer)
-               return NULL;
-
-       definer->dw_selector[0] = MLX5_IFC_DEFINER_FORMAT_OFFSET_OUTER_ETH_PKT_LEN / 4;
-       /* Set DW0 tag mask */
-       tag = (__force __be32 *)definer->mask.jumbo;
-       tag[MLX5HWS_RULE_JUMBO_MATCH_TAG_OFFSET_DW0] = htonl(0xffffUL << 16);
-
-       mutex_lock(&ctx->ctrl_lock);
-
-       ret = mlx5hws_definer_get_obj(ctx, definer);
-       if (ret < 0) {
-               mutex_unlock(&ctx->ctrl_lock);
-               kfree(definer);
-               return NULL;
-       }
-
-       mutex_unlock(&ctx->ctrl_lock);
-       definer->obj_id = ret;
-
-       return definer;
-}
-
-static struct mlx5hws_matcher_action_ste *
-hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
-                                        struct mlx5hws_definer *definer,
-                                        u32 miss_ft_id)
-{
-       struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
-       struct mlx5hws_action_default_stc *default_stc;
-       struct mlx5hws_matcher_action_ste *table_ste;
-       struct mlx5hws_pool_attr pool_attr = {0};
-       struct mlx5hws_pool *ste_pool, *stc_pool;
-       struct mlx5hws_pool_chunk *ste;
-       u32 *rtc_0_id, *rtc_1_id;
-       u32 obj_id;
-       int ret;
-
-       /* Check if STE range is supported */
-       if (!IS_BIT_SET(ctx->caps->supp_ste_format_gen_wqe, MLX5_IFC_RTC_STE_FORMAT_RANGE)) {
-               mlx5hws_err(ctx, "Range STE format not supported\n");
-               return NULL;
-       }
-
-       table_ste = kzalloc(sizeof(*table_ste), GFP_KERNEL);
-       if (!table_ste)
-               return NULL;
-
-       mutex_lock(&ctx->ctrl_lock);
-
-       pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
-       pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
-       pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL;
-       pool_attr.alloc_log_sz = 1;
-       table_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
-       if (!table_ste->pool) {
-               mlx5hws_err(ctx, "Failed to allocate memory ste pool\n");
-               goto free_ste;
-       }
-
-       /* Allocate RTC */
-       rtc_0_id = &table_ste->rtc_0_id;
-       rtc_1_id = &table_ste->rtc_1_id;
-       ste_pool = table_ste->pool;
-       ste = &table_ste->ste;
-       ste->order = 1;
-
-       rtc_attr.log_size = 0;
-       rtc_attr.log_depth = 0;
-       rtc_attr.miss_ft_id = miss_ft_id;
-       rtc_attr.num_hash_definer = 1;
-       rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
-       rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
-       rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
-       rtc_attr.fw_gen_wqe = true;
-       rtc_attr.is_scnd_range = true;
-
-       obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
-
-       rtc_attr.pd = ctx->pd_num;
-       rtc_attr.ste_base = obj_id;
-       rtc_attr.ste_offset = ste->offset;
-       rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
-       rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, false);
-
-       /* STC is a single resource (obj_id), use any STC for the ID */
-       stc_pool = ctx->stc_pool[MLX5HWS_TABLE_TYPE_FDB];
-       default_stc = ctx->common_res[MLX5HWS_TABLE_TYPE_FDB].default_stc;
-       obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
-       rtc_attr.stc_base = obj_id;
-
-       ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to create RTC");
-               goto pool_destroy;
-       }
-
-       /* Create mirror RTC */
-       obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
-       rtc_attr.ste_base = obj_id;
-       rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, true);
-
-       obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit);
-       rtc_attr.stc_base = obj_id;
-
-       ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to create mirror RTC");
-               goto destroy_rtc_0;
-       }
-
-       mutex_unlock(&ctx->ctrl_lock);
-
-       return table_ste;
-
-destroy_rtc_0:
-       mlx5hws_cmd_rtc_destroy(ctx->mdev, *rtc_0_id);
-pool_destroy:
-       mlx5hws_pool_destroy(table_ste->pool);
-free_ste:
-       mutex_unlock(&ctx->ctrl_lock);
-       kfree(table_ste);
-       return NULL;
-}
-
-static void
-hws_action_destroy_dest_match_range_table(struct mlx5hws_context *ctx,
-                                         struct mlx5hws_matcher_action_ste *table_ste)
-{
-       mutex_lock(&ctx->ctrl_lock);
-
-       mlx5hws_cmd_rtc_destroy(ctx->mdev, table_ste->rtc_1_id);
-       mlx5hws_cmd_rtc_destroy(ctx->mdev, table_ste->rtc_0_id);
-       mlx5hws_pool_destroy(table_ste->pool);
-       kfree(table_ste);
-
-       mutex_unlock(&ctx->ctrl_lock);
-}
-
-static int
-hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx,
-                                             struct mlx5hws_matcher_action_ste *table_ste,
-                                             struct mlx5hws_action *hit_ft_action,
-                                             struct mlx5hws_definer *range_definer,
-                                             u32 min, u32 max)
-{
-       struct mlx5hws_wqe_gta_data_seg_ste match_wqe_data = {0};
-       struct mlx5hws_wqe_gta_data_seg_ste range_wqe_data = {0};
-       struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl = {0};
-       u32 no_use, used_rtc_0_id, used_rtc_1_id, ret;
-       struct mlx5hws_context_common_res *common_res;
-       struct mlx5hws_send_ste_attr ste_attr = {0};
-       struct mlx5hws_send_engine *queue;
-       __be32 *wqe_data_arr;
-
-       mutex_lock(&ctx->ctrl_lock);
-
-       /* Get the control queue */
-       queue = &ctx->send_queue[ctx->queues - 1];
-       if (unlikely(mlx5hws_send_engine_err(queue))) {
-               ret = -EIO;
-               goto error;
-       }
-
-       /* Init default send STE attributes */
-       ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
-       ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
-       ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
-       ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
-       ste_attr.send_attr.user_data = &no_use;
-       ste_attr.send_attr.rule = NULL;
-       ste_attr.send_attr.fence = 1;
-       ste_attr.send_attr.notify_hw = true;
-       ste_attr.rtc_0 = table_ste->rtc_0_id;
-       ste_attr.rtc_1 = table_ste->rtc_1_id;
-       ste_attr.used_id_rtc_0 = &used_rtc_0_id;
-       ste_attr.used_id_rtc_1 = &used_rtc_1_id;
-
-       common_res = &ctx->common_res[MLX5HWS_TABLE_TYPE_FDB];
-
-       /* init an empty match STE which will always hit */
-       ste_attr.wqe_ctrl = &wqe_ctrl;
-       ste_attr.wqe_data = &match_wqe_data;
-       ste_attr.send_attr.match_definer_id = ctx->caps->trivial_match_definer;
-
-       /* Fill WQE control data */
-       wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] =
-               htonl(common_res->default_stc->nop_ctr.offset);
-       wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
-               htonl(common_res->default_stc->nop_dw5.offset);
-       wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] =
-               htonl(common_res->default_stc->nop_dw6.offset);
-       wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] =
-               htonl(common_res->default_stc->nop_dw7.offset);
-       wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |=
-               htonl(MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 << 29);
-       wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] =
-               htonl(hit_ft_action->stc[MLX5HWS_TABLE_TYPE_FDB].offset);
-
-       wqe_data_arr = (__force __be32 *)&range_wqe_data;
-
-       ste_attr.range_wqe_data = &range_wqe_data;
-       ste_attr.send_attr.len += MLX5HWS_WQE_SZ_GTA_DATA;
-       ste_attr.send_attr.range_definer_id = mlx5hws_definer_get_id(range_definer);
-
-       /* Fill range matching fields,
-        * min/max_value_2 corresponds to match_dw_0 in its definer,
-        * min_value_2 sets in DW0 in the STE and max_value_2 sets in DW1 in the STE.
-        */
-       wqe_data_arr[MLX5HWS_MATCHER_OFFSET_TAG_DW0] = htonl(min << 16);
-       wqe_data_arr[MLX5HWS_MATCHER_OFFSET_TAG_DW1] = htonl(max << 16);
-
-       /* Send WQEs to FW */
-       mlx5hws_send_stes_fw(ctx, queue, &ste_attr);
-
-       /* Poll for completion */
-       ret = mlx5hws_send_queue_action(ctx, ctx->queues - 1,
-                                       MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to drain control queue");
-               goto error;
-       }
-
-       mutex_unlock(&ctx->ctrl_lock);
-
-       return 0;
-
-error:
-       mutex_unlock(&ctx->ctrl_lock);
-       return ret;
-}
-
-struct mlx5hws_action *
-mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx,
-                                      u32 field,
-                                      struct mlx5_flow_table *hit_ft,
-                                      struct mlx5_flow_table *miss_ft,
-                                      u32 min, u32 max, u32 flags)
-{
-       struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
-       struct mlx5hws_matcher_action_ste *table_ste;
-       struct mlx5hws_action *hit_ft_action;
-       struct mlx5hws_definer *definer;
-       struct mlx5hws_action *action;
-       u32 miss_ft_id = miss_ft->id;
-       u32 hit_ft_id = hit_ft->id;
-       int ret;
-
-       if (field != MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN ||
-           min > 0xffff || max > 0xffff) {
-               mlx5hws_err(ctx, "Invalid match range parameters\n");
-               return NULL;
-       }
-
-       action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_RANGE);
-       if (!action)
-               return NULL;
-
-       definer = hws_action_create_dest_match_range_definer(ctx);
-       if (!definer)
-               goto free_action;
-
-       table_ste = hws_action_create_dest_match_range_table(ctx, definer, miss_ft_id);
-       if (!table_ste)
-               goto destroy_definer;
-
-       hit_ft_action = mlx5hws_action_create_dest_table_num(ctx, hit_ft_id, flags);
-       if (!hit_ft_action)
-               goto destroy_table_ste;
-
-       ret = hws_action_create_dest_match_range_fill_table(ctx, table_ste,
-                                                           hit_ft_action,
-                                                           definer, min, max);
-       if (ret)
-               goto destroy_hit_ft_action;
-
-       action->range.table_ste = table_ste;
-       action->range.definer = definer;
-       action->range.hit_ft_action = hit_ft_action;
-
-       /* Allocate STC for jumps to STE */
-       mutex_lock(&ctx->ctrl_lock);
-       stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
-       stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
-       stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
-       stc_attr.ste_table.ste = table_ste->ste;
-       stc_attr.ste_table.ste_pool = table_ste->pool;
-       stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
-
-       ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, MLX5HWS_TABLE_TYPE_FDB,
-                                             &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
-       if (ret)
-               goto error_unlock;
-
-       mutex_unlock(&ctx->ctrl_lock);
-
-       return action;
-
-error_unlock:
-       mutex_unlock(&ctx->ctrl_lock);
-destroy_hit_ft_action:
-       mlx5hws_action_destroy(hit_ft_action);
-destroy_table_ste:
-       hws_action_destroy_dest_match_range_table(ctx, table_ste);
-destroy_definer:
-       mlx5hws_definer_free(ctx, definer);
-free_action:
-       kfree(action);
-       mlx5hws_err(ctx, "Failed to create action dest match range");
-       return NULL;
-}
-
-struct mlx5hws_action *
-mlx5hws_action_create_last(struct mlx5hws_context *ctx, u32 flags)
-{
-       return hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_LAST);
-}
-
-struct mlx5hws_action *
-mlx5hws_action_create_flow_sampler(struct mlx5hws_context *ctx,
-                                  u32 sampler_id, u32 flags)
-{
-       mlx5hws_err(ctx, "Flow sampler action - unsupported\n");
-       return NULL;
-}
-
-static void hws_action_destroy_hws(struct mlx5hws_action *action)
-{
-       u32 ext_reformat_id;
-       bool shared_arg;
-       u32 obj_id;
-       u32 i;
-
-       switch (action->type) {
-       case MLX5HWS_ACTION_TYP_MISS:
-       case MLX5HWS_ACTION_TYP_TAG:
-       case MLX5HWS_ACTION_TYP_DROP:
-       case MLX5HWS_ACTION_TYP_CTR:
-       case MLX5HWS_ACTION_TYP_TBL:
-       case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
-       case MLX5HWS_ACTION_TYP_ASO_METER:
-       case MLX5HWS_ACTION_TYP_PUSH_VLAN:
-       case MLX5HWS_ACTION_TYP_REMOVE_HEADER:
-       case MLX5HWS_ACTION_TYP_VPORT:
-               hws_action_destroy_stcs(action);
-               break;
-       case MLX5HWS_ACTION_TYP_POP_VLAN:
-               hws_action_destroy_stcs(action);
-               hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP);
-               break;
-       case MLX5HWS_ACTION_TYP_DEST_ARRAY:
-               hws_action_destroy_stcs(action);
-               mlx5hws_cmd_forward_tbl_destroy(action->ctx->mdev, action->dest_array.fw_island);
-               for (i = 0; i < action->dest_array.num_dest; i++) {
-                       ext_reformat_id = action->dest_array.dest_list[i].ext_reformat_id;
-                       if (ext_reformat_id)
-                               mlx5hws_cmd_packet_reformat_destroy(action->ctx->mdev,
-                                                                   ext_reformat_id);
-               }
-               kfree(action->dest_array.dest_list);
-               break;
-       case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
-       case MLX5HWS_ACTION_TYP_MODIFY_HDR:
-               shared_arg = false;
-               for (i = 0; i < action->modify_header.num_of_patterns; i++) {
-                       hws_action_destroy_stcs(&action[i]);
-                       if (action[i].modify_header.num_of_actions > 1) {
-                               mlx5hws_pat_put_pattern(action[i].ctx,
-                                                       action[i].modify_header.pat_id);
-                               /* Save shared arg object to be freed after */
-                               obj_id = action[i].modify_header.arg_id;
-                               shared_arg = true;
-                       }
-               }
-               if (shared_arg)
-                       mlx5hws_arg_destroy(action->ctx, obj_id);
-               break;
-       case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
-               hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3);
-               for (i = 0; i < action->reformat.num_of_hdrs; i++)
-                       hws_action_destroy_stcs(&action[i]);
-               mlx5hws_arg_destroy(action->ctx, action->reformat.arg_id);
-               break;
-       case MLX5HWS_ACTION_TYP_INSERT_HEADER:
-       case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
-               for (i = 0; i < action->reformat.num_of_hdrs; i++)
-                       hws_action_destroy_stcs(&action[i]);
-               mlx5hws_arg_destroy(action->ctx, action->reformat.arg_id);
-               break;
-       case MLX5HWS_ACTION_TYP_RANGE:
-               hws_action_destroy_stcs(action);
-               hws_action_destroy_dest_match_range_table(action->ctx, action->range.table_ste);
-               mlx5hws_definer_free(action->ctx, action->range.definer);
-               mlx5hws_action_destroy(action->range.hit_ft_action);
-               break;
-       case MLX5HWS_ACTION_TYP_LAST:
-               break;
-       default:
-               pr_warn("HWS: Invalid action type: %d\n", action->type);
-       }
-}
-
-int mlx5hws_action_destroy(struct mlx5hws_action *action)
-{
-       hws_action_destroy_hws(action);
-
-       kfree(action);
-       return 0;
-}
-
-int mlx5hws_action_get_default_stc(struct mlx5hws_context *ctx, u8 tbl_type)
-__must_hold(&ctx->ctrl_lock)
-{
-       struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
-       struct mlx5hws_action_default_stc *default_stc;
-       int ret;
-
-       if (ctx->common_res[tbl_type].default_stc) {
-               ctx->common_res[tbl_type].default_stc->refcount++;
-               return 0;
-       }
-
-       default_stc = kzalloc(sizeof(*default_stc), GFP_KERNEL);
-       if (!default_stc)
-               return -ENOMEM;
-
-       stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_NOP;
-       stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW0;
-       stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
-       ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
-                                             &default_stc->nop_ctr);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to allocate default counter STC\n");
-               goto free_default_stc;
-       }
-
-       stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5;
-       ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
-                                             &default_stc->nop_dw5);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to allocate default NOP DW5 STC\n");
-               goto free_nop_ctr;
-       }
-
-       stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW6;
-       ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
-                                             &default_stc->nop_dw6);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to allocate default NOP DW6 STC\n");
-               goto free_nop_dw5;
-       }
-
-       stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW7;
-       ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
-                                             &default_stc->nop_dw7);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to allocate default NOP DW7 STC\n");
-               goto free_nop_dw6;
-       }
-
-       stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
-       stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW;
-
-       ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
-                                             &default_stc->default_hit);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to allocate default allow STC\n");
-               goto free_nop_dw7;
-       }
-
-       ctx->common_res[tbl_type].default_stc = default_stc;
-       ctx->common_res[tbl_type].default_stc->refcount++;
-
-       return 0;
-
-free_nop_dw7:
-       mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7);
-free_nop_dw6:
-       mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6);
-free_nop_dw5:
-       mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5);
-free_nop_ctr:
-       mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr);
-free_default_stc:
-       kfree(default_stc);
-       return ret;
-}
-
-void mlx5hws_action_put_default_stc(struct mlx5hws_context *ctx, u8 tbl_type)
-__must_hold(&ctx->ctrl_lock)
-{
-       struct mlx5hws_action_default_stc *default_stc;
-
-       default_stc = ctx->common_res[tbl_type].default_stc;
-
-       default_stc = ctx->common_res[tbl_type].default_stc;
-       if (--default_stc->refcount)
-               return;
-
-       mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->default_hit);
-       mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7);
-       mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6);
-       mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5);
-       mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr);
-       kfree(default_stc);
-       ctx->common_res[tbl_type].default_stc = NULL;
-}
-
-static void hws_action_modify_write(struct mlx5hws_send_engine *queue,
-                                   u32 arg_idx,
-                                   u8 *arg_data,
-                                   u16 num_of_actions,
-                                   u32 nope_locations)
-{
-       u8 *new_arg_data = NULL;
-       int i, j;
-
-       if (unlikely(nope_locations)) {
-               new_arg_data = kcalloc(num_of_actions,
-                                      MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL);
-               if (unlikely(!new_arg_data))
-                       return;
-
-               for (i = 0, j = 0; i < num_of_actions; i++, j++) {
-                       memcpy(&new_arg_data[j], arg_data, MLX5HWS_MODIFY_ACTION_SIZE);
-                       if (BIT(i) & nope_locations)
-                               j++;
-               }
-       }
-
-       mlx5hws_arg_write(queue, NULL, arg_idx,
-                         new_arg_data ? new_arg_data : arg_data,
-                         num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE);
-
-       kfree(new_arg_data);
-}
-
-void mlx5hws_action_prepare_decap_l3_data(u8 *src, u8 *dst, u16 num_of_actions)
-{
-       u8 *e_src;
-       int i;
-
-       /* num_of_actions = remove l3l2 + 4/5 inserts + remove extra 2 bytes
-        * copy from end of src to the start of dst.
-        * move to the end, 2 is the leftover from 14B or 18B
-        */
-       if (num_of_actions == DECAP_L3_NUM_ACTIONS_W_NO_VLAN)
-               e_src = src + MLX5HWS_ACTION_HDR_LEN_L2;
-       else
-               e_src = src + MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN;
-
-       /* Move dst over the first remove action + zero data */
-       dst += MLX5HWS_ACTION_DOUBLE_SIZE;
-       /* Move dst over the first insert ctrl action */
-       dst += MLX5HWS_ACTION_DOUBLE_SIZE / 2;
-       /* Actions:
-        * no vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b.
-        * with vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b.
-        * the loop is without the last insertion.
-        */
-       for (i = 0; i < num_of_actions - 3; i++) {
-               e_src -= MLX5HWS_ACTION_INLINE_DATA_SIZE;
-               memcpy(dst, e_src, MLX5HWS_ACTION_INLINE_DATA_SIZE); /* data */
-               dst += MLX5HWS_ACTION_DOUBLE_SIZE;
-       }
-       /* Copy the last 2 bytes after a gap of 2 bytes which will be removed */
-       e_src -= MLX5HWS_ACTION_INLINE_DATA_SIZE / 2;
-       dst += MLX5HWS_ACTION_INLINE_DATA_SIZE / 2;
-       memcpy(dst, e_src, 2);
-}
-
-static int
-hws_action_get_shared_stc_offset(struct mlx5hws_context_common_res *common_res,
-                                enum mlx5hws_context_shared_stc_type stc_type)
-{
-       return common_res->shared_stc[stc_type]->stc_chunk.offset;
-}
-
-static struct mlx5hws_actions_wqe_setter *
-hws_action_setter_find_first(struct mlx5hws_actions_wqe_setter *setter,
-                            u8 req_flags)
-{
-       /* Use a new setter if requested flags are taken */
-       while (setter->flags & req_flags)
-               setter++;
-
-       /* Use current setter in required flags are not used */
-       return setter;
-}
-
-static void
-hws_action_apply_stc(struct mlx5hws_actions_apply_data *apply,
-                    enum mlx5hws_action_stc_idx stc_idx,
-                    u8 action_idx)
-{
-       struct mlx5hws_action *action = apply->rule_action[action_idx].action;
-
-       apply->wqe_ctrl->stc_ix[stc_idx] =
-               htonl(action->stc[apply->tbl_type].offset);
-}
-
-static void
-hws_action_setter_push_vlan(struct mlx5hws_actions_apply_data *apply,
-                           struct mlx5hws_actions_wqe_setter *setter)
-{
-       struct mlx5hws_rule_action *rule_action;
-
-       rule_action = &apply->rule_action[setter->idx_double];
-       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
-       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = rule_action->push_vlan.vlan_hdr;
-
-       hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW6, setter->idx_double);
-       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
-}
-
-static void
-hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply,
-                               struct mlx5hws_actions_wqe_setter *setter)
-{
-       struct mlx5hws_rule_action *rule_action;
-       struct mlx5hws_action *action;
-       u32 arg_sz, arg_idx;
-       u8 *single_action;
-       __be32 stc_idx;
-
-       rule_action = &apply->rule_action[setter->idx_double];
-       action = rule_action->action;
-
-       stc_idx = htonl(action->stc[apply->tbl_type].offset);
-       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
-       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
-
-       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
-
-       if (action->modify_header.num_of_actions == 1) {
-               if (action->modify_header.single_action_type ==
-                   MLX5_MODIFICATION_TYPE_COPY ||
-                   action->modify_header.single_action_type ==
-                   MLX5_MODIFICATION_TYPE_ADD_FIELD) {
-                       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0;
-                       return;
-               }
-
-               if (action->flags & MLX5HWS_ACTION_FLAG_SHARED)
-                       single_action = (u8 *)&action->modify_header.single_action;
-               else
-                       single_action = rule_action->modify_header.data;
-
-               apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] =
-                       *(__be32 *)MLX5_ADDR_OF(set_action_in, single_action, data);
-       } else {
-               /* Argument offset multiple with number of args per these actions */
-               arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions);
-               arg_idx = rule_action->modify_header.offset * arg_sz;
-
-               apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
-
-               if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
-                       apply->require_dep = 1;
-                       hws_action_modify_write(apply->queue,
-                                               action->modify_header.arg_id + arg_idx,
-                                               rule_action->modify_header.data,
-                                               action->modify_header.num_of_actions,
-                                               action->modify_header.nope_locations);
-               }
-       }
-}
-
-static void
-hws_action_setter_insert_ptr(struct mlx5hws_actions_apply_data *apply,
-                            struct mlx5hws_actions_wqe_setter *setter)
-{
-       struct mlx5hws_rule_action *rule_action;
-       struct mlx5hws_action *action;
-       u32 arg_idx, arg_sz;
-       __be32 stc_idx;
-
-       rule_action = &apply->rule_action[setter->idx_double];
-       action = rule_action->action + rule_action->reformat.hdr_idx;
-
-       /* Argument offset multiple on args required for header size */
-       arg_sz = mlx5hws_arg_data_size_to_arg_size(action->reformat.max_hdr_sz);
-       arg_idx = rule_action->reformat.offset * arg_sz;
-
-       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
-       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
-
-       stc_idx = htonl(action->stc[apply->tbl_type].offset);
-       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
-       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
-
-       if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
-               apply->require_dep = 1;
-               mlx5hws_arg_write(apply->queue, NULL,
-                                 action->reformat.arg_id + arg_idx,
-                                 rule_action->reformat.data,
-                                 action->reformat.header_size);
-       }
-}
-
-static void
-hws_action_setter_tnl_l3_to_l2(struct mlx5hws_actions_apply_data *apply,
-                              struct mlx5hws_actions_wqe_setter *setter)
-{
-       struct mlx5hws_rule_action *rule_action;
-       struct mlx5hws_action *action;
-       u32 arg_sz, arg_idx;
-       __be32 stc_idx;
-
-       rule_action = &apply->rule_action[setter->idx_double];
-       action = rule_action->action + rule_action->reformat.hdr_idx;
-
-       /* Argument offset multiple on args required for num of actions */
-       arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions);
-       arg_idx = rule_action->reformat.offset * arg_sz;
-
-       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
-       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
-
-       stc_idx = htonl(action->stc[apply->tbl_type].offset);
-       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
-       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
-
-       if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
-               apply->require_dep = 1;
-               mlx5hws_arg_decapl3_write(apply->queue,
-                                         action->modify_header.arg_id + arg_idx,
-                                         rule_action->reformat.data,
-                                         action->modify_header.num_of_actions);
-       }
-}
-
-static void
-hws_action_setter_aso(struct mlx5hws_actions_apply_data *apply,
-                     struct mlx5hws_actions_wqe_setter *setter)
-{
-       struct mlx5hws_rule_action *rule_action;
-       u32 exe_aso_ctrl;
-       u32 offset;
-
-       rule_action = &apply->rule_action[setter->idx_double];
-
-       switch (rule_action->action->type) {
-       case MLX5HWS_ACTION_TYP_ASO_METER:
-               /* exe_aso_ctrl format:
-                * [STC only and reserved bits 29b][init_color 2b][meter_id 1b]
-                */
-               offset = rule_action->aso_meter.offset / MLX5_ASO_METER_NUM_PER_OBJ;
-               exe_aso_ctrl = rule_action->aso_meter.offset % MLX5_ASO_METER_NUM_PER_OBJ;
-               exe_aso_ctrl |= rule_action->aso_meter.init_color <<
-                               MLX5HWS_ACTION_METER_INIT_COLOR_OFFSET;
-               break;
-       default:
-               mlx5hws_err(rule_action->action->ctx,
-                           "Unsupported ASO action type: %d\n", rule_action->action->type);
-               return;
-       }
-
-       /* aso_object_offset format: [24B] */
-       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = htonl(offset);
-       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(exe_aso_ctrl);
-
-       hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW6, setter->idx_double);
-       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
-}
-
-static void
-hws_action_setter_tag(struct mlx5hws_actions_apply_data *apply,
-                     struct mlx5hws_actions_wqe_setter *setter)
-{
-       struct mlx5hws_rule_action *rule_action;
-
-       rule_action = &apply->rule_action[setter->idx_single];
-       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = htonl(rule_action->tag.value);
-       hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW5, setter->idx_single);
-}
-
-static void
-hws_action_setter_ctrl_ctr(struct mlx5hws_actions_apply_data *apply,
-                          struct mlx5hws_actions_wqe_setter *setter)
-{
-       struct mlx5hws_rule_action *rule_action;
-
-       rule_action = &apply->rule_action[setter->idx_ctr];
-       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW0] = htonl(rule_action->counter.offset);
-       hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_CTRL, setter->idx_ctr);
-}
-
-static void
-hws_action_setter_single(struct mlx5hws_actions_apply_data *apply,
-                        struct mlx5hws_actions_wqe_setter *setter)
-{
-       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
-       hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW5, setter->idx_single);
-}
-
-static void
-hws_action_setter_single_double_pop(struct mlx5hws_actions_apply_data *apply,
-                                   struct mlx5hws_actions_wqe_setter *setter)
-{
-       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
-       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
-               htonl(hws_action_get_shared_stc_offset(apply->common_res,
-                                                      MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP));
-}
-
-static void
-hws_action_setter_hit(struct mlx5hws_actions_apply_data *apply,
-                     struct mlx5hws_actions_wqe_setter *setter)
-{
-       apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0;
-       hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_HIT, setter->idx_hit);
-}
-
-static void
-hws_action_setter_default_hit(struct mlx5hws_actions_apply_data *apply,
-                             struct mlx5hws_actions_wqe_setter *setter)
-{
-       apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0;
-       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] =
-               htonl(apply->common_res->default_stc->default_hit.offset);
-}
-
-static void
-hws_action_setter_hit_next_action(struct mlx5hws_actions_apply_data *apply,
-                                 struct mlx5hws_actions_wqe_setter *setter)
-{
-       apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = htonl(apply->next_direct_idx << 6);
-       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] = htonl(apply->jump_to_action_stc);
-}
-
-static void
-hws_action_setter_common_decap(struct mlx5hws_actions_apply_data *apply,
-                              struct mlx5hws_actions_wqe_setter *setter)
-{
-       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
-       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
-               htonl(hws_action_get_shared_stc_offset(apply->common_res,
-                                                      MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3));
-}
-
-static void
-hws_action_setter_range(struct mlx5hws_actions_apply_data *apply,
-                       struct mlx5hws_actions_wqe_setter *setter)
-{
-       /* Always jump to index zero */
-       apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0;
-       hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_HIT, setter->idx_hit);
-}
-
-int mlx5hws_action_template_process(struct mlx5hws_action_template *at)
-{
-       struct mlx5hws_actions_wqe_setter *start_setter = at->setters + 1;
-       enum mlx5hws_action_type *action_type = at->action_type_arr;
-       struct mlx5hws_actions_wqe_setter *setter = at->setters;
-       struct mlx5hws_actions_wqe_setter *pop_setter = NULL;
-       struct mlx5hws_actions_wqe_setter *last_setter;
-       int i;
-
-       /* Note: Given action combination must be valid */
-
-       /* Check if action were already processed */
-       if (at->num_of_action_stes)
-               return 0;
-
-       for (i = 0; i < MLX5HWS_ACTION_MAX_STE; i++)
-               setter[i].set_hit = &hws_action_setter_hit_next_action;
-
-       /* The same action template setters can be used with jumbo or match
-        * STE, to support both cases we reserve the first setter for cases
-        * with jumbo STE to allow jump to the first action STE.
-        * This extra setter can be reduced in some cases on rule creation.
-        */
-       setter = start_setter;
-       last_setter = start_setter;
-
-       for (i = 0; i < at->num_actions; i++) {
-               switch (action_type[i]) {
-               case MLX5HWS_ACTION_TYP_DROP:
-               case MLX5HWS_ACTION_TYP_TBL:
-               case MLX5HWS_ACTION_TYP_DEST_ARRAY:
-               case MLX5HWS_ACTION_TYP_VPORT:
-               case MLX5HWS_ACTION_TYP_MISS:
-                       /* Hit action */
-                       last_setter->flags |= ASF_HIT;
-                       last_setter->set_hit = &hws_action_setter_hit;
-                       last_setter->idx_hit = i;
-                       break;
-
-               case MLX5HWS_ACTION_TYP_RANGE:
-                       last_setter->flags |= ASF_HIT;
-                       last_setter->set_hit = &hws_action_setter_range;
-                       last_setter->idx_hit = i;
-                       break;
-
-               case MLX5HWS_ACTION_TYP_POP_VLAN:
-                       /* Single remove header to header */
-                       if (pop_setter) {
-                               /* We have 2 pops, use the shared */
-                               pop_setter->set_single = &hws_action_setter_single_double_pop;
-                               break;
-                       }
-                       setter = hws_action_setter_find_first(last_setter,
-                                                             ASF_SINGLE1 | ASF_MODIFY |
-                                                             ASF_INSERT);
-                       setter->flags |= ASF_SINGLE1 | ASF_REMOVE;
-                       setter->set_single = &hws_action_setter_single;
-                       setter->idx_single = i;
-                       pop_setter = setter;
-                       break;
-
-               case MLX5HWS_ACTION_TYP_PUSH_VLAN:
-                       /* Double insert inline */
-                       setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
-                       setter->flags |= ASF_DOUBLE | ASF_INSERT;
-                       setter->set_double = &hws_action_setter_push_vlan;
-                       setter->idx_double = i;
-                       break;
-
-               case MLX5HWS_ACTION_TYP_MODIFY_HDR:
-                       /* Double modify header list */
-                       setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
-                       setter->flags |= ASF_DOUBLE | ASF_MODIFY;
-                       setter->set_double = &hws_action_setter_modify_header;
-                       setter->idx_double = i;
-                       break;
-
-               case MLX5HWS_ACTION_TYP_ASO_METER:
-                       /* Double ASO action */
-                       setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE);
-                       setter->flags |= ASF_DOUBLE;
-                       setter->set_double = &hws_action_setter_aso;
-                       setter->idx_double = i;
-                       break;
-
-               case MLX5HWS_ACTION_TYP_REMOVE_HEADER:
-               case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
-                       /* Single remove header to header */
-                       setter = hws_action_setter_find_first(last_setter,
-                                                             ASF_SINGLE1 | ASF_MODIFY);
-                       setter->flags |= ASF_SINGLE1 | ASF_REMOVE;
-                       setter->set_single = &hws_action_setter_single;
-                       setter->idx_single = i;
-                       break;
-
-               case MLX5HWS_ACTION_TYP_INSERT_HEADER:
-               case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
-                       /* Double insert header with pointer */
-                       setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
-                       setter->flags |= ASF_DOUBLE | ASF_INSERT;
-                       setter->set_double = &hws_action_setter_insert_ptr;
-                       setter->idx_double = i;
-                       break;
-
-               case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
-                       /* Single remove + Double insert header with pointer */
-                       setter = hws_action_setter_find_first(last_setter,
-                                                             ASF_SINGLE1 | ASF_DOUBLE);
-                       setter->flags |= ASF_SINGLE1 | ASF_DOUBLE;
-                       setter->set_double = &hws_action_setter_insert_ptr;
-                       setter->idx_double = i;
-                       setter->set_single = &hws_action_setter_common_decap;
-                       setter->idx_single = i;
-                       break;
-
-               case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
-                       /* Double modify header list with remove and push inline */
-                       setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
-                       setter->flags |= ASF_DOUBLE | ASF_MODIFY | ASF_INSERT;
-                       setter->set_double = &hws_action_setter_tnl_l3_to_l2;
-                       setter->idx_double = i;
-                       break;
-
-               case MLX5HWS_ACTION_TYP_TAG:
-                       /* Single TAG action, search for any room from the start */
-                       setter = hws_action_setter_find_first(start_setter, ASF_SINGLE1);
-                       setter->flags |= ASF_SINGLE1;
-                       setter->set_single = &hws_action_setter_tag;
-                       setter->idx_single = i;
-                       break;
-
-               case MLX5HWS_ACTION_TYP_CTR:
-                       /* Control counter action
-                        * TODO: Current counter executed first. Support is needed
-                        *       for single ation counter action which is done last.
-                        *       Example: Decap + CTR
-                        */
-                       setter = hws_action_setter_find_first(start_setter, ASF_CTR);
-                       setter->flags |= ASF_CTR;
-                       setter->set_ctr = &hws_action_setter_ctrl_ctr;
-                       setter->idx_ctr = i;
-                       break;
-               default:
-                       pr_warn("HWS: Invalid action type in processingaction template: action_type[%d]=%d\n",
-                               i, action_type[i]);
-                       return -EOPNOTSUPP;
-               }
-
-               last_setter = max(setter, last_setter);
-       }
-
-       /* Set default hit on the last STE if no hit action provided */
-       if (!(last_setter->flags & ASF_HIT))
-               last_setter->set_hit = &hws_action_setter_default_hit;
-
-       at->num_of_action_stes = last_setter - start_setter + 1;
-
-       /* Check if action template doesn't require any action DWs */
-       at->only_term = (at->num_of_action_stes == 1) &&
-               !(last_setter->flags & ~(ASF_CTR | ASF_HIT));
-
-       return 0;
-}
-
-struct mlx5hws_action_template *
-mlx5hws_action_template_create(enum mlx5hws_action_type action_type[])
-{
-       struct mlx5hws_action_template *at;
-       u8 num_actions = 0;
-       int i;
-
-       at = kzalloc(sizeof(*at), GFP_KERNEL);
-       if (!at)
-               return NULL;
-
-       while (action_type[num_actions++] != MLX5HWS_ACTION_TYP_LAST)
-               ;
-
-       at->num_actions = num_actions - 1;
-       at->action_type_arr = kcalloc(num_actions, sizeof(*action_type), GFP_KERNEL);
-       if (!at->action_type_arr)
-               goto free_at;
-
-       for (i = 0; i < num_actions; i++)
-               at->action_type_arr[i] = action_type[i];
-
-       return at;
-
-free_at:
-       kfree(at);
-       return NULL;
-}
-
-int mlx5hws_action_template_destroy(struct mlx5hws_action_template *at)
-{
-       kfree(at->action_type_arr);
-       kfree(at);
-       return 0;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h
deleted file mode 100644 (file)
index bf5c1b2..0000000
+++ /dev/null
@@ -1,307 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#ifndef MLX5HWS_ACTION_H_
-#define MLX5HWS_ACTION_H_
-
-/* Max number of STEs needed for a rule (including match) */
-#define MLX5HWS_ACTION_MAX_STE 20
-
-/* Max number of internal subactions of ipv6_ext */
-#define MLX5HWS_ACTION_IPV6_EXT_MAX_SA 4
-
-enum mlx5hws_action_stc_idx {
-       MLX5HWS_ACTION_STC_IDX_CTRL = 0,
-       MLX5HWS_ACTION_STC_IDX_HIT = 1,
-       MLX5HWS_ACTION_STC_IDX_DW5 = 2,
-       MLX5HWS_ACTION_STC_IDX_DW6 = 3,
-       MLX5HWS_ACTION_STC_IDX_DW7 = 4,
-       MLX5HWS_ACTION_STC_IDX_MAX = 5,
-       /* STC Jumvo STE combo: CTR, Hit */
-       MLX5HWS_ACTION_STC_IDX_LAST_JUMBO_STE = 1,
-       /* STC combo1: CTR, SINGLE, DOUBLE, Hit */
-       MLX5HWS_ACTION_STC_IDX_LAST_COMBO1 = 3,
-       /* STC combo2: CTR, 3 x SINGLE, Hit */
-       MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 = 4,
-       /* STC combo2: CTR, TRIPLE, Hit */
-       MLX5HWS_ACTION_STC_IDX_LAST_COMBO3 = 2,
-};
-
-enum mlx5hws_action_offset {
-       MLX5HWS_ACTION_OFFSET_DW0 = 0,
-       MLX5HWS_ACTION_OFFSET_DW5 = 5,
-       MLX5HWS_ACTION_OFFSET_DW6 = 6,
-       MLX5HWS_ACTION_OFFSET_DW7 = 7,
-       MLX5HWS_ACTION_OFFSET_HIT = 3,
-       MLX5HWS_ACTION_OFFSET_HIT_LSB = 4,
-};
-
-enum {
-       MLX5HWS_ACTION_DOUBLE_SIZE = 8,
-       MLX5HWS_ACTION_INLINE_DATA_SIZE = 4,
-       MLX5HWS_ACTION_HDR_LEN_L2_MACS = 12,
-       MLX5HWS_ACTION_HDR_LEN_L2_VLAN = 4,
-       MLX5HWS_ACTION_HDR_LEN_L2_ETHER = 2,
-       MLX5HWS_ACTION_HDR_LEN_L2 = (MLX5HWS_ACTION_HDR_LEN_L2_MACS +
-                                    MLX5HWS_ACTION_HDR_LEN_L2_ETHER),
-       MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN = (MLX5HWS_ACTION_HDR_LEN_L2 +
-                                           MLX5HWS_ACTION_HDR_LEN_L2_VLAN),
-       MLX5HWS_ACTION_REFORMAT_DATA_SIZE = 64,
-       DECAP_L3_NUM_ACTIONS_W_NO_VLAN = 6,
-       DECAP_L3_NUM_ACTIONS_W_VLAN = 7,
-};
-
-enum mlx5hws_action_setter_flag {
-       ASF_SINGLE1 = 1 << 0,
-       ASF_SINGLE2 = 1 << 1,
-       ASF_SINGLE3 = 1 << 2,
-       ASF_DOUBLE = ASF_SINGLE2 | ASF_SINGLE3,
-       ASF_TRIPLE = ASF_SINGLE1 | ASF_DOUBLE,
-       ASF_INSERT = 1 << 3,
-       ASF_REMOVE = 1 << 4,
-       ASF_MODIFY = 1 << 5,
-       ASF_CTR = 1 << 6,
-       ASF_HIT = 1 << 7,
-};
-
-struct mlx5hws_action_default_stc {
-       struct mlx5hws_pool_chunk nop_ctr;
-       struct mlx5hws_pool_chunk nop_dw5;
-       struct mlx5hws_pool_chunk nop_dw6;
-       struct mlx5hws_pool_chunk nop_dw7;
-       struct mlx5hws_pool_chunk default_hit;
-       u32 refcount;
-};
-
-struct mlx5hws_action_shared_stc {
-       struct mlx5hws_pool_chunk stc_chunk;
-       u32 refcount;
-};
-
-struct mlx5hws_actions_apply_data {
-       struct mlx5hws_send_engine *queue;
-       struct mlx5hws_rule_action *rule_action;
-       __be32 *wqe_data;
-       struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
-       u32 jump_to_action_stc;
-       struct mlx5hws_context_common_res *common_res;
-       enum mlx5hws_table_type tbl_type;
-       u32 next_direct_idx;
-       u8 require_dep;
-};
-
-struct mlx5hws_actions_wqe_setter;
-
-typedef void (*mlx5hws_action_setter_fp)(struct mlx5hws_actions_apply_data *apply,
-                                        struct mlx5hws_actions_wqe_setter *setter);
-
-struct mlx5hws_actions_wqe_setter {
-       mlx5hws_action_setter_fp set_single;
-       mlx5hws_action_setter_fp set_double;
-       mlx5hws_action_setter_fp set_triple;
-       mlx5hws_action_setter_fp set_hit;
-       mlx5hws_action_setter_fp set_ctr;
-       u8 idx_single;
-       u8 idx_double;
-       u8 idx_triple;
-       u8 idx_ctr;
-       u8 idx_hit;
-       u8 stage_idx;
-       u8 flags;
-};
-
-struct mlx5hws_action_template {
-       struct mlx5hws_actions_wqe_setter setters[MLX5HWS_ACTION_MAX_STE];
-       enum mlx5hws_action_type *action_type_arr;
-       u8 num_of_action_stes;
-       u8 num_actions;
-       u8 only_term;
-};
-
-struct mlx5hws_action {
-       u8 type;
-       u8 flags;
-       struct mlx5hws_context *ctx;
-       union {
-               struct {
-                       struct mlx5hws_pool_chunk stc[MLX5HWS_TABLE_TYPE_MAX];
-                       union {
-                               struct {
-                                       u32 pat_id;
-                                       u32 arg_id;
-                                       __be64 single_action;
-                                       u32 nope_locations;
-                                       u8 num_of_patterns;
-                                       u8 single_action_type;
-                                       u8 num_of_actions;
-                                       u8 max_num_of_actions;
-                                       u8 require_reparse;
-                               } modify_header;
-                               struct {
-                                       u32 arg_id;
-                                       u32 header_size;
-                                       u16 max_hdr_sz;
-                                       u8 num_of_hdrs;
-                                       u8 anchor;
-                                       u8 e_anchor;
-                                       u8 offset;
-                                       bool encap;
-                                       u8 require_reparse;
-                               } reformat;
-                               struct {
-                                       u32 obj_id;
-                                       u8 return_reg_id;
-                               } aso;
-                               struct {
-                                       u16 vport_num;
-                                       u16 esw_owner_vhca_id;
-                                       bool esw_owner_vhca_id_valid;
-                               } vport;
-                               struct {
-                                       u32 obj_id;
-                               } dest_obj;
-                               struct {
-                                       struct mlx5hws_cmd_forward_tbl *fw_island;
-                                       size_t num_dest;
-                                       struct mlx5hws_cmd_set_fte_dest *dest_list;
-                               } dest_array;
-                               struct {
-                                       u8 type;
-                                       u8 start_anchor;
-                                       u8 end_anchor;
-                                       u8 num_of_words;
-                                       bool decap;
-                               } insert_hdr;
-                               struct {
-                                       /* PRM start anchor from which header will be removed */
-                                       u8 anchor;
-                                       /* Header remove offset in bytes, from the start
-                                        * anchor to the location where remove header starts.
-                                        */
-                                       u8 offset;
-                                       /* Indicates the removed header size in bytes */
-                                       size_t size;
-                               } remove_header;
-                               struct {
-                                       struct mlx5hws_matcher_action_ste *table_ste;
-                                       struct mlx5hws_action *hit_ft_action;
-                                       struct mlx5hws_definer *definer;
-                               } range;
-                       };
-               };
-
-               struct ibv_flow_action *flow_action;
-               u32 obj_id;
-               struct ibv_qp *qp;
-       };
-};
-
-const char *mlx5hws_action_type_to_str(enum mlx5hws_action_type action_type);
-
-int mlx5hws_action_get_default_stc(struct mlx5hws_context *ctx,
-                                  u8 tbl_type);
-
-void mlx5hws_action_put_default_stc(struct mlx5hws_context *ctx,
-                                   u8 tbl_type);
-
-void mlx5hws_action_prepare_decap_l3_data(u8 *src, u8 *dst,
-                                         u16 num_of_actions);
-
-int mlx5hws_action_template_process(struct mlx5hws_action_template *at);
-
-bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx,
-                               enum mlx5hws_action_type *user_actions,
-                               enum mlx5hws_table_type table_type);
-
-int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx,
-                                   struct mlx5hws_cmd_stc_modify_attr *stc_attr,
-                                   u32 table_type,
-                                   struct mlx5hws_pool_chunk *stc);
-
-void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx,
-                                   u32 table_type,
-                                   struct mlx5hws_pool_chunk *stc);
-
-static inline void
-mlx5hws_action_setter_default_single(struct mlx5hws_actions_apply_data *apply,
-                                    struct mlx5hws_actions_wqe_setter *setter)
-{
-       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
-       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
-               htonl(apply->common_res->default_stc->nop_dw5.offset);
-}
-
-static inline void
-mlx5hws_action_setter_default_double(struct mlx5hws_actions_apply_data *apply,
-                                    struct mlx5hws_actions_wqe_setter *setter)
-{
-       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
-       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0;
-       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] =
-               htonl(apply->common_res->default_stc->nop_dw6.offset);
-       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] =
-               htonl(apply->common_res->default_stc->nop_dw7.offset);
-}
-
-static inline void
-mlx5hws_action_setter_default_ctr(struct mlx5hws_actions_apply_data *apply,
-                                 struct mlx5hws_actions_wqe_setter *setter)
-{
-       apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW0] = 0;
-       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] =
-               htonl(apply->common_res->default_stc->nop_ctr.offset);
-}
-
-static inline void
-mlx5hws_action_apply_setter(struct mlx5hws_actions_apply_data *apply,
-                           struct mlx5hws_actions_wqe_setter *setter,
-                           bool is_jumbo)
-{
-       u8 num_of_actions;
-
-       /* Set control counter */
-       if (setter->set_ctr)
-               setter->set_ctr(apply, setter);
-       else
-               mlx5hws_action_setter_default_ctr(apply, setter);
-
-       if (!is_jumbo) {
-               if (unlikely(setter->set_triple)) {
-                       /* Set triple on match */
-                       setter->set_triple(apply, setter);
-                       num_of_actions = MLX5HWS_ACTION_STC_IDX_LAST_COMBO3;
-               } else {
-                       /* Set single and double on match */
-                       if (setter->set_single)
-                               setter->set_single(apply, setter);
-                       else
-                               mlx5hws_action_setter_default_single(apply, setter);
-
-                       if (setter->set_double)
-                               setter->set_double(apply, setter);
-                       else
-                               mlx5hws_action_setter_default_double(apply, setter);
-
-                       num_of_actions = setter->set_double ?
-                               MLX5HWS_ACTION_STC_IDX_LAST_COMBO1 :
-                               MLX5HWS_ACTION_STC_IDX_LAST_COMBO2;
-               }
-       } else {
-               apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
-               apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
-               apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0;
-               apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] = 0;
-               apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = 0;
-               apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
-               num_of_actions = MLX5HWS_ACTION_STC_IDX_LAST_JUMBO_STE;
-       }
-
-       /* Set next/final hit action */
-       setter->set_hit(apply, setter);
-
-       /* Set number of actions */
-       apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |=
-               htonl(num_of_actions << 29);
-}
-
-#endif /* MLX5HWS_ACTION_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c
deleted file mode 100644 (file)
index e6ed662..0000000
+++ /dev/null
@@ -1,149 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#include "mlx5hws_internal.h"
-#include "mlx5hws_buddy.h"
-
-static int hws_buddy_init(struct mlx5hws_buddy_mem *buddy, u32 max_order)
-{
-       int i, s, ret = 0;
-
-       buddy->max_order = max_order;
-
-       buddy->bitmap = kcalloc(buddy->max_order + 1,
-                               sizeof(*buddy->bitmap),
-                               GFP_KERNEL);
-       if (!buddy->bitmap)
-               return -ENOMEM;
-
-       buddy->num_free = kcalloc(buddy->max_order + 1,
-                                 sizeof(*buddy->num_free),
-                                 GFP_KERNEL);
-       if (!buddy->num_free) {
-               ret = -ENOMEM;
-               goto err_out_free_bits;
-       }
-
-       for (i = 0; i <= (int)buddy->max_order; ++i) {
-               s = 1 << (buddy->max_order - i);
-
-               buddy->bitmap[i] = bitmap_zalloc(s, GFP_KERNEL);
-               if (!buddy->bitmap[i]) {
-                       ret = -ENOMEM;
-                       goto err_out_free_num_free;
-               }
-       }
-
-       bitmap_set(buddy->bitmap[buddy->max_order], 0, 1);
-       buddy->num_free[buddy->max_order] = 1;
-
-       return 0;
-
-err_out_free_num_free:
-       for (i = 0; i <= (int)buddy->max_order; ++i)
-               bitmap_free(buddy->bitmap[i]);
-
-       kfree(buddy->num_free);
-
-err_out_free_bits:
-       kfree(buddy->bitmap);
-       return ret;
-}
-
-struct mlx5hws_buddy_mem *mlx5hws_buddy_create(u32 max_order)
-{
-       struct mlx5hws_buddy_mem *buddy;
-
-       buddy = kzalloc(sizeof(*buddy), GFP_KERNEL);
-       if (!buddy)
-               return NULL;
-
-       if (hws_buddy_init(buddy, max_order))
-               goto free_buddy;
-
-       return buddy;
-
-free_buddy:
-       kfree(buddy);
-       return NULL;
-}
-
-void mlx5hws_buddy_cleanup(struct mlx5hws_buddy_mem *buddy)
-{
-       int i;
-
-       for (i = 0; i <= (int)buddy->max_order; ++i)
-               bitmap_free(buddy->bitmap[i]);
-
-       kfree(buddy->num_free);
-       kfree(buddy->bitmap);
-}
-
-static int hws_buddy_find_free_seg(struct mlx5hws_buddy_mem *buddy,
-                                  u32 start_order,
-                                  u32 *segment,
-                                  u32 *order)
-{
-       unsigned int seg, order_iter, m;
-
-       for (order_iter = start_order;
-            order_iter <= buddy->max_order; ++order_iter) {
-               if (!buddy->num_free[order_iter])
-                       continue;
-
-               m = 1 << (buddy->max_order - order_iter);
-               seg = find_first_bit(buddy->bitmap[order_iter], m);
-
-               if (WARN(seg >= m,
-                        "ICM Buddy: failed finding free mem for order %d\n",
-                        order_iter))
-                       return -ENOMEM;
-
-               break;
-       }
-
-       if (order_iter > buddy->max_order)
-               return -ENOMEM;
-
-       *segment = seg;
-       *order = order_iter;
-       return 0;
-}
-
-int mlx5hws_buddy_alloc_mem(struct mlx5hws_buddy_mem *buddy, u32 order)
-{
-       u32 seg, order_iter, err;
-
-       err = hws_buddy_find_free_seg(buddy, order, &seg, &order_iter);
-       if (err)
-               return err;
-
-       bitmap_clear(buddy->bitmap[order_iter], seg, 1);
-       --buddy->num_free[order_iter];
-
-       while (order_iter > order) {
-               --order_iter;
-               seg <<= 1;
-               bitmap_set(buddy->bitmap[order_iter], seg ^ 1, 1);
-               ++buddy->num_free[order_iter];
-       }
-
-       seg <<= order;
-
-       return seg;
-}
-
-void mlx5hws_buddy_free_mem(struct mlx5hws_buddy_mem *buddy, u32 seg, u32 order)
-{
-       seg >>= order;
-
-       while (test_bit(seg ^ 1, buddy->bitmap[order])) {
-               bitmap_clear(buddy->bitmap[order], seg ^ 1, 1);
-               --buddy->num_free[order];
-               seg >>= 1;
-               ++order;
-       }
-
-       bitmap_set(buddy->bitmap[order], seg, 1);
-       ++buddy->num_free[order];
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h
deleted file mode 100644 (file)
index 338c44b..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#ifndef MLX5HWS_BUDDY_H_
-#define MLX5HWS_BUDDY_H_
-
-struct mlx5hws_buddy_mem {
-       unsigned long **bitmap;
-       unsigned int *num_free;
-       u32 max_order;
-};
-
-struct mlx5hws_buddy_mem *mlx5hws_buddy_create(u32 max_order);
-
-void mlx5hws_buddy_cleanup(struct mlx5hws_buddy_mem *buddy);
-
-int mlx5hws_buddy_alloc_mem(struct mlx5hws_buddy_mem *buddy, u32 order);
-
-void mlx5hws_buddy_free_mem(struct mlx5hws_buddy_mem *buddy, u32 seg, u32 order);
-
-#endif /* MLX5HWS_BUDDY_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c
deleted file mode 100644 (file)
index 8f3a6f9..0000000
+++ /dev/null
@@ -1,995 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#include "mlx5hws_internal.h"
-
-static u16 hws_bwc_gen_queue_idx(struct mlx5hws_context *ctx)
-{
-       /* assign random queue */
-       return get_random_u8() % mlx5hws_bwc_queues(ctx);
-}
-
-static u16
-hws_bwc_get_burst_th(struct mlx5hws_context *ctx, u16 queue_id)
-{
-       return min(ctx->send_queue[queue_id].num_entries / 2,
-                  MLX5HWS_BWC_MATCHER_REHASH_BURST_TH);
-}
-
-static struct mutex *
-hws_bwc_get_queue_lock(struct mlx5hws_context *ctx, u16 idx)
-{
-       return &ctx->bwc_send_queue_locks[idx];
-}
-
-static void hws_bwc_lock_all_queues(struct mlx5hws_context *ctx)
-{
-       u16 bwc_queues = mlx5hws_bwc_queues(ctx);
-       struct mutex *queue_lock; /* Protect the queue */
-       int i;
-
-       for (i = 0; i < bwc_queues; i++) {
-               queue_lock = hws_bwc_get_queue_lock(ctx, i);
-               mutex_lock(queue_lock);
-       }
-}
-
-static void hws_bwc_unlock_all_queues(struct mlx5hws_context *ctx)
-{
-       u16 bwc_queues = mlx5hws_bwc_queues(ctx);
-       struct mutex *queue_lock; /* Protect the queue */
-       int i = bwc_queues;
-
-       while (i--) {
-               queue_lock = hws_bwc_get_queue_lock(ctx, i);
-               mutex_unlock(queue_lock);
-       }
-}
-
-static void hws_bwc_matcher_init_attr(struct mlx5hws_matcher_attr *attr,
-                                     u32 priority,
-                                     u8 size_log)
-{
-       memset(attr, 0, sizeof(*attr));
-
-       attr->priority = priority;
-       attr->optimize_using_rule_idx = 0;
-       attr->mode = MLX5HWS_MATCHER_RESOURCE_MODE_RULE;
-       attr->optimize_flow_src = MLX5HWS_MATCHER_FLOW_SRC_ANY;
-       attr->insert_mode = MLX5HWS_MATCHER_INSERT_BY_HASH;
-       attr->distribute_mode = MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH;
-       attr->rule.num_log = size_log;
-       attr->resizable = true;
-       attr->max_num_of_at_attach = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
-}
-
-int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
-                                     struct mlx5hws_table *table,
-                                     u32 priority,
-                                     u8 match_criteria_enable,
-                                     struct mlx5hws_match_parameters *mask,
-                                     enum mlx5hws_action_type action_types[])
-{
-       enum mlx5hws_action_type init_action_types[1] = { MLX5HWS_ACTION_TYP_LAST };
-       struct mlx5hws_context *ctx = table->ctx;
-       u16 bwc_queues = mlx5hws_bwc_queues(ctx);
-       struct mlx5hws_matcher_attr attr = {0};
-       int i;
-
-       bwc_matcher->rules = kcalloc(bwc_queues, sizeof(*bwc_matcher->rules), GFP_KERNEL);
-       if (!bwc_matcher->rules)
-               goto err;
-
-       for (i = 0; i < bwc_queues; i++)
-               INIT_LIST_HEAD(&bwc_matcher->rules[i]);
-
-       hws_bwc_matcher_init_attr(&attr,
-                                 priority,
-                                 MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG);
-
-       bwc_matcher->priority = priority;
-       bwc_matcher->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
-
-       /* create dummy action template */
-       bwc_matcher->at[0] =
-               mlx5hws_action_template_create(action_types ?
-                                              action_types : init_action_types);
-       if (!bwc_matcher->at[0]) {
-               mlx5hws_err(table->ctx, "BWC matcher: failed creating action template\n");
-               goto free_bwc_matcher_rules;
-       }
-
-       bwc_matcher->num_of_at = 1;
-
-       bwc_matcher->mt = mlx5hws_match_template_create(ctx,
-                                                       mask->match_buf,
-                                                       mask->match_sz,
-                                                       match_criteria_enable);
-       if (!bwc_matcher->mt) {
-               mlx5hws_err(table->ctx, "BWC matcher: failed creating match template\n");
-               goto free_at;
-       }
-
-       bwc_matcher->matcher = mlx5hws_matcher_create(table,
-                                                     &bwc_matcher->mt, 1,
-                                                     &bwc_matcher->at[0],
-                                                     bwc_matcher->num_of_at,
-                                                     &attr);
-       if (!bwc_matcher->matcher) {
-               mlx5hws_err(table->ctx, "BWC matcher: failed creating HWS matcher\n");
-               goto free_mt;
-       }
-
-       return 0;
-
-free_mt:
-       mlx5hws_match_template_destroy(bwc_matcher->mt);
-free_at:
-       mlx5hws_action_template_destroy(bwc_matcher->at[0]);
-free_bwc_matcher_rules:
-       kfree(bwc_matcher->rules);
-err:
-       return -EINVAL;
-}
-
-struct mlx5hws_bwc_matcher *
-mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
-                          u32 priority,
-                          u8 match_criteria_enable,
-                          struct mlx5hws_match_parameters *mask)
-{
-       struct mlx5hws_bwc_matcher *bwc_matcher;
-       bool is_complex;
-       int ret;
-
-       if (!mlx5hws_context_bwc_supported(table->ctx)) {
-               mlx5hws_err(table->ctx,
-                           "BWC matcher: context created w/o BWC API compatibility\n");
-               return NULL;
-       }
-
-       bwc_matcher = kzalloc(sizeof(*bwc_matcher), GFP_KERNEL);
-       if (!bwc_matcher)
-               return NULL;
-
-       /* Check if the required match params can be all matched
-        * in single STE, otherwise complex matcher is needed.
-        */
-
-       is_complex = mlx5hws_bwc_match_params_is_complex(table->ctx, match_criteria_enable, mask);
-       if (is_complex)
-               ret = mlx5hws_bwc_matcher_create_complex(bwc_matcher,
-                                                        table,
-                                                        priority,
-                                                        match_criteria_enable,
-                                                        mask);
-       else
-               ret = mlx5hws_bwc_matcher_create_simple(bwc_matcher,
-                                                       table,
-                                                       priority,
-                                                       match_criteria_enable,
-                                                       mask,
-                                                       NULL);
-       if (ret)
-               goto free_bwc_matcher;
-
-       return bwc_matcher;
-
-free_bwc_matcher:
-       kfree(bwc_matcher);
-
-       return NULL;
-}
-
-int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
-{
-       int i;
-
-       mlx5hws_matcher_destroy(bwc_matcher->matcher);
-       bwc_matcher->matcher = NULL;
-
-       for (i = 0; i < bwc_matcher->num_of_at; i++)
-               mlx5hws_action_template_destroy(bwc_matcher->at[i]);
-
-       mlx5hws_match_template_destroy(bwc_matcher->mt);
-       kfree(bwc_matcher->rules);
-
-       return 0;
-}
-
-int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
-{
-       if (bwc_matcher->num_of_rules)
-               mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
-                           "BWC matcher destroy: matcher still has %d rules\n",
-                           bwc_matcher->num_of_rules);
-
-       mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
-
-       kfree(bwc_matcher);
-       return 0;
-}
-
-static int hws_bwc_queue_poll(struct mlx5hws_context *ctx,
-                             u16 queue_id,
-                             u32 *pending_rules,
-                             bool drain)
-{
-       struct mlx5hws_flow_op_result comp[MLX5HWS_BWC_MATCHER_REHASH_BURST_TH];
-       u16 burst_th = hws_bwc_get_burst_th(ctx, queue_id);
-       bool got_comp = *pending_rules >= burst_th;
-       bool queue_full;
-       int err = 0;
-       int ret;
-       int i;
-
-       /* Check if there are any completions at all */
-       if (!got_comp && !drain)
-               return 0;
-
-       queue_full = mlx5hws_send_engine_full(&ctx->send_queue[queue_id]);
-       while (queue_full || ((got_comp || drain) && *pending_rules)) {
-               ret = mlx5hws_send_queue_poll(ctx, queue_id, comp, burst_th);
-               if (unlikely(ret < 0)) {
-                       mlx5hws_err(ctx, "BWC poll error: polling queue %d returned %d\n",
-                                   queue_id, ret);
-                       return -EINVAL;
-               }
-
-               if (ret) {
-                       (*pending_rules) -= ret;
-                       for (i = 0; i < ret; i++) {
-                               if (unlikely(comp[i].status != MLX5HWS_FLOW_OP_SUCCESS)) {
-                                       mlx5hws_err(ctx,
-                                                   "BWC poll error: polling queue %d returned completion with error\n",
-                                                   queue_id);
-                                       err = -EINVAL;
-                               }
-                       }
-                       queue_full = false;
-               }
-
-               got_comp = !!ret;
-       }
-
-       return err;
-}
-
-void
-mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
-                          u16 bwc_queue_idx,
-                          u32 flow_source,
-                          struct mlx5hws_rule_attr *rule_attr)
-{
-       struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
-
-       /* no use of INSERT_BY_INDEX in bwc rule */
-       rule_attr->rule_idx = 0;
-
-       /* notify HW at each rule insertion/deletion */
-       rule_attr->burst = 0;
-
-       /* We don't need user data, but the API requires it to exist */
-       rule_attr->user_data = (void *)0xFACADE;
-
-       rule_attr->queue_id = mlx5hws_bwc_get_queue_id(ctx, bwc_queue_idx);
-       rule_attr->flow_source = flow_source;
-}
-
-struct mlx5hws_bwc_rule *
-mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher)
-{
-       struct mlx5hws_bwc_rule *bwc_rule;
-
-       bwc_rule = kzalloc(sizeof(*bwc_rule), GFP_KERNEL);
-       if (unlikely(!bwc_rule))
-               goto out_err;
-
-       bwc_rule->rule = kzalloc(sizeof(*bwc_rule->rule), GFP_KERNEL);
-       if (unlikely(!bwc_rule->rule))
-               goto free_rule;
-
-       bwc_rule->bwc_matcher = bwc_matcher;
-       return bwc_rule;
-
-free_rule:
-       kfree(bwc_rule);
-out_err:
-       return NULL;
-}
-
-void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule)
-{
-       if (likely(bwc_rule->rule))
-               kfree(bwc_rule->rule);
-       kfree(bwc_rule);
-}
-
-static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx)
-{
-       struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
-
-       bwc_matcher->num_of_rules++;
-       bwc_rule->bwc_queue_idx = idx;
-       list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]);
-}
-
-static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule)
-{
-       struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
-
-       bwc_matcher->num_of_rules--;
-       list_del_init(&bwc_rule->list_node);
-}
-
-static int
-hws_bwc_rule_destroy_hws_async(struct mlx5hws_bwc_rule *bwc_rule,
-                              struct mlx5hws_rule_attr *attr)
-{
-       return mlx5hws_rule_destroy(bwc_rule->rule, attr);
-}
-
-static int
-hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule,
-                             struct mlx5hws_rule_attr *rule_attr)
-{
-       struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
-       struct mlx5hws_flow_op_result completion;
-       int ret;
-
-       ret = hws_bwc_rule_destroy_hws_async(bwc_rule, rule_attr);
-       if (unlikely(ret))
-               return ret;
-
-       do {
-               ret = mlx5hws_send_queue_poll(ctx, rule_attr->queue_id, &completion, 1);
-       } while (ret != 1);
-
-       if (unlikely(completion.status != MLX5HWS_FLOW_OP_SUCCESS ||
-                    (bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED &&
-                     bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING))) {
-               mlx5hws_err(ctx, "Failed destroying BWC rule: completion %d, rule status %d\n",
-                           completion.status, bwc_rule->rule->status);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
-{
-       struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
-       struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
-       u16 idx = bwc_rule->bwc_queue_idx;
-       struct mlx5hws_rule_attr attr;
-       struct mutex *queue_lock; /* Protect the queue */
-       int ret;
-
-       mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, 0, &attr);
-
-       queue_lock = hws_bwc_get_queue_lock(ctx, idx);
-
-       mutex_lock(queue_lock);
-
-       ret = hws_bwc_rule_destroy_hws_sync(bwc_rule, &attr);
-       hws_bwc_rule_list_remove(bwc_rule);
-
-       mutex_unlock(queue_lock);
-
-       return ret;
-}
-
-int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule)
-{
-       int ret;
-
-       ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
-
-       mlx5hws_bwc_rule_free(bwc_rule);
-       return ret;
-}
-
-static int
-hws_bwc_rule_create_async(struct mlx5hws_bwc_rule *bwc_rule,
-                         u32 *match_param,
-                         u8 at_idx,
-                         struct mlx5hws_rule_action rule_actions[],
-                         struct mlx5hws_rule_attr *rule_attr)
-{
-       return mlx5hws_rule_create(bwc_rule->bwc_matcher->matcher,
-                                  0, /* only one match template supported */
-                                  match_param,
-                                  at_idx,
-                                  rule_actions,
-                                  rule_attr,
-                                  bwc_rule->rule);
-}
-
-static int
-hws_bwc_rule_create_sync(struct mlx5hws_bwc_rule *bwc_rule,
-                        u32 *match_param,
-                        u8 at_idx,
-                        struct mlx5hws_rule_action rule_actions[],
-                        struct mlx5hws_rule_attr *rule_attr)
-
-{
-       struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
-       u32 expected_completions = 1;
-       int ret;
-
-       ret = hws_bwc_rule_create_async(bwc_rule, match_param,
-                                       at_idx, rule_actions,
-                                       rule_attr);
-       if (unlikely(ret))
-               return ret;
-
-       ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
-
-       return ret;
-}
-
-static int
-hws_bwc_rule_update_sync(struct mlx5hws_bwc_rule *bwc_rule,
-                        u8 at_idx,
-                        struct mlx5hws_rule_action rule_actions[],
-                        struct mlx5hws_rule_attr *rule_attr)
-{
-       struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
-       struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
-       u32 expected_completions = 1;
-       int ret;
-
-       ret = mlx5hws_rule_action_update(bwc_rule->rule,
-                                        at_idx,
-                                        rule_actions,
-                                        rule_attr);
-       if (unlikely(ret))
-               return ret;
-
-       ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
-       if (unlikely(ret))
-               mlx5hws_err(ctx, "Failed updating BWC rule (%d)\n", ret);
-
-       return ret;
-}
-
-static bool
-hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher)
-{
-       struct mlx5hws_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps;
-
-       return bwc_matcher->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH >=
-              caps->ste_alloc_log_max - 1;
-}
-
-static bool
-hws_bwc_matcher_rehash_size_needed(struct mlx5hws_bwc_matcher *bwc_matcher,
-                                  u32 num_of_rules)
-{
-       if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher)))
-               return false;
-
-       if (unlikely((num_of_rules * 100 / MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH) >=
-                    (1UL << bwc_matcher->size_log)))
-               return true;
-
-       return false;
-}
-
-static void
-hws_bwc_rule_actions_to_action_types(struct mlx5hws_rule_action rule_actions[],
-                                    enum mlx5hws_action_type action_types[])
-{
-       int i = 0;
-
-       for (i = 0;
-            rule_actions[i].action && (rule_actions[i].action->type != MLX5HWS_ACTION_TYP_LAST);
-            i++) {
-               action_types[i] = (enum mlx5hws_action_type)rule_actions[i].action->type;
-       }
-
-       action_types[i] = MLX5HWS_ACTION_TYP_LAST;
-}
-
-static int
-hws_bwc_matcher_extend_at(struct mlx5hws_bwc_matcher *bwc_matcher,
-                         struct mlx5hws_rule_action rule_actions[])
-{
-       enum mlx5hws_action_type action_types[MLX5HWS_BWC_MAX_ACTS];
-
-       hws_bwc_rule_actions_to_action_types(rule_actions, action_types);
-
-       bwc_matcher->at[bwc_matcher->num_of_at] =
-               mlx5hws_action_template_create(action_types);
-
-       if (unlikely(!bwc_matcher->at[bwc_matcher->num_of_at]))
-               return -ENOMEM;
-
-       bwc_matcher->num_of_at++;
-       return 0;
-}
-
-static int
-hws_bwc_matcher_extend_size(struct mlx5hws_bwc_matcher *bwc_matcher)
-{
-       struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
-       struct mlx5hws_cmd_query_caps *caps = ctx->caps;
-
-       if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher))) {
-               mlx5hws_err(ctx, "Can't resize matcher: depth exceeds limit %d\n",
-                           caps->rtc_log_depth_max);
-               return -ENOMEM;
-       }
-
-       bwc_matcher->size_log =
-               min(bwc_matcher->size_log + MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
-                   caps->ste_alloc_log_max - MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH);
-
-       return 0;
-}
-
-static int
-hws_bwc_matcher_find_at(struct mlx5hws_bwc_matcher *bwc_matcher,
-                       struct mlx5hws_rule_action rule_actions[])
-{
-       enum mlx5hws_action_type *action_type_arr;
-       int i, j;
-
-       /* start from index 1 - first action template is a dummy */
-       for (i = 1; i < bwc_matcher->num_of_at; i++) {
-               j = 0;
-               action_type_arr = bwc_matcher->at[i]->action_type_arr;
-
-               while (rule_actions[j].action &&
-                      rule_actions[j].action->type != MLX5HWS_ACTION_TYP_LAST) {
-                       if (action_type_arr[j] != rule_actions[j].action->type)
-                               break;
-                       j++;
-               }
-
-               if (action_type_arr[j] == MLX5HWS_ACTION_TYP_LAST &&
-                   (!rule_actions[j].action ||
-                    rule_actions[j].action->type == MLX5HWS_ACTION_TYP_LAST))
-                       return i;
-       }
-
-       return -1;
-}
-
-static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
-{
-       struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
-       u16 bwc_queues = mlx5hws_bwc_queues(ctx);
-       struct mlx5hws_bwc_rule **bwc_rules;
-       struct mlx5hws_rule_attr rule_attr;
-       u32 *pending_rules;
-       int i, j, ret = 0;
-       bool all_done;
-       u16 burst_th;
-
-       mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
-
-       pending_rules = kcalloc(bwc_queues, sizeof(*pending_rules), GFP_KERNEL);
-       if (!pending_rules)
-               return -ENOMEM;
-
-       bwc_rules = kcalloc(bwc_queues, sizeof(*bwc_rules), GFP_KERNEL);
-       if (!bwc_rules) {
-               ret = -ENOMEM;
-               goto free_pending_rules;
-       }
-
-       for (i = 0; i < bwc_queues; i++) {
-               if (list_empty(&bwc_matcher->rules[i]))
-                       bwc_rules[i] = NULL;
-               else
-                       bwc_rules[i] = list_first_entry(&bwc_matcher->rules[i],
-                                                       struct mlx5hws_bwc_rule,
-                                                       list_node);
-       }
-
-       do {
-               all_done = true;
-
-               for (i = 0; i < bwc_queues; i++) {
-                       rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
-                       burst_th = hws_bwc_get_burst_th(ctx, rule_attr.queue_id);
-
-                       for (j = 0; j < burst_th && bwc_rules[i]; j++) {
-                               rule_attr.burst = !!((j + 1) % burst_th);
-                               ret = mlx5hws_matcher_resize_rule_move(bwc_matcher->matcher,
-                                                                      bwc_rules[i]->rule,
-                                                                      &rule_attr);
-                               if (unlikely(ret)) {
-                                       mlx5hws_err(ctx,
-                                                   "Moving BWC rule failed during rehash (%d)\n",
-                                                   ret);
-                                       goto free_bwc_rules;
-                               }
-
-                               all_done = false;
-                               pending_rules[i]++;
-                               bwc_rules[i] = list_is_last(&bwc_rules[i]->list_node,
-                                                           &bwc_matcher->rules[i]) ?
-                                              NULL : list_next_entry(bwc_rules[i], list_node);
-
-                               ret = hws_bwc_queue_poll(ctx, rule_attr.queue_id,
-                                                        &pending_rules[i], false);
-                               if (unlikely(ret))
-                                       goto free_bwc_rules;
-                       }
-               }
-       } while (!all_done);
-
-       /* drain all the bwc queues */
-       for (i = 0; i < bwc_queues; i++) {
-               if (pending_rules[i]) {
-                       u16 queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
-
-                       mlx5hws_send_engine_flush_queue(&ctx->send_queue[queue_id]);
-                       ret = hws_bwc_queue_poll(ctx, queue_id,
-                                                &pending_rules[i], true);
-                       if (unlikely(ret))
-                               goto free_bwc_rules;
-               }
-       }
-
-free_bwc_rules:
-       kfree(bwc_rules);
-free_pending_rules:
-       kfree(pending_rules);
-
-       return ret;
-}
-
-static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher)
-{
-       return hws_bwc_matcher_move_all_simple(bwc_matcher);
-}
-
-static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
-{
-       struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
-       struct mlx5hws_matcher_attr matcher_attr = {0};
-       struct mlx5hws_matcher *old_matcher;
-       struct mlx5hws_matcher *new_matcher;
-       int ret;
-
-       hws_bwc_matcher_init_attr(&matcher_attr,
-                                 bwc_matcher->priority,
-                                 bwc_matcher->size_log);
-
-       old_matcher = bwc_matcher->matcher;
-       new_matcher = mlx5hws_matcher_create(old_matcher->tbl,
-                                            &bwc_matcher->mt, 1,
-                                            bwc_matcher->at,
-                                            bwc_matcher->num_of_at,
-                                            &matcher_attr);
-       if (!new_matcher) {
-               mlx5hws_err(ctx, "Rehash error: matcher creation failed\n");
-               return -ENOMEM;
-       }
-
-       ret = mlx5hws_matcher_resize_set_target(old_matcher, new_matcher);
-       if (ret) {
-               mlx5hws_err(ctx, "Rehash error: failed setting resize target\n");
-               return ret;
-       }
-
-       ret = hws_bwc_matcher_move_all(bwc_matcher);
-       if (ret) {
-               mlx5hws_err(ctx, "Rehash error: moving rules failed\n");
-               return -ENOMEM;
-       }
-
-       bwc_matcher->matcher = new_matcher;
-       mlx5hws_matcher_destroy(old_matcher);
-
-       return 0;
-}
-
-static int
-hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
-{
-       int ret;
-
-       /* If the current matcher size is already at its max size, we can't
-        * do the rehash. Skip it and try adding the rule again - perhaps
-        * there was some change.
-        */
-       if (hws_bwc_matcher_size_maxed_out(bwc_matcher))
-               return 0;
-
-       /* It is possible that other rule has already performed rehash.
-        * Need to check again if we really need rehash.
-        * If the reason for rehash was size, but not any more - skip rehash.
-        */
-       if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, bwc_matcher->num_of_rules))
-               return 0;
-
-       /* Now we're done all the checking - do the rehash:
-        *  - extend match RTC size
-        *  - create new matcher
-        *  - move all the rules to the new matcher
-        *  - destroy the old matcher
-        */
-
-       ret = hws_bwc_matcher_extend_size(bwc_matcher);
-       if (ret)
-               return ret;
-
-       return hws_bwc_matcher_move(bwc_matcher);
-}
-
-static int
-hws_bwc_matcher_rehash_at(struct mlx5hws_bwc_matcher *bwc_matcher)
-{
-       /* Rehash by action template doesn't require any additional checking.
-        * The bwc_matcher already contains the new action template.
-        * Just do the usual rehash:
-        *  - create new matcher
-        *  - move all the rules to the new matcher
-        *  - destroy the old matcher
-        */
-       return hws_bwc_matcher_move(bwc_matcher);
-}
-
-int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
-                                  u32 *match_param,
-                                  struct mlx5hws_rule_action rule_actions[],
-                                  u32 flow_source,
-                                  u16 bwc_queue_idx)
-{
-       struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
-       struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
-       struct mlx5hws_rule_attr rule_attr;
-       struct mutex *queue_lock; /* Protect the queue */
-       u32 num_of_rules;
-       int ret = 0;
-       int at_idx;
-
-       mlx5hws_bwc_rule_fill_attr(bwc_matcher, bwc_queue_idx, flow_source, &rule_attr);
-
-       queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
-
-       mutex_lock(queue_lock);
-
-       /* check if rehash needed due to missing action template */
-       at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
-       if (unlikely(at_idx < 0)) {
-               /* we need to extend BWC matcher action templates array */
-               mutex_unlock(queue_lock);
-               hws_bwc_lock_all_queues(ctx);
-
-               ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
-               if (unlikely(ret)) {
-                       hws_bwc_unlock_all_queues(ctx);
-                       return ret;
-               }
-
-               /* action templates array was extended, we need the last idx */
-               at_idx = bwc_matcher->num_of_at - 1;
-
-               ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
-                                               bwc_matcher->at[at_idx]);
-               if (unlikely(ret)) {
-                       /* Action template attach failed, possibly due to
-                        * requiring more action STEs.
-                        * Need to attempt creating new matcher with all
-                        * the action templates, including the new one.
-                        */
-                       ret = hws_bwc_matcher_rehash_at(bwc_matcher);
-                       if (unlikely(ret)) {
-                               mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]);
-                               bwc_matcher->at[at_idx] = NULL;
-                               bwc_matcher->num_of_at--;
-
-                               hws_bwc_unlock_all_queues(ctx);
-
-                               mlx5hws_err(ctx,
-                                           "BWC rule insertion: rehash AT failed (%d)\n", ret);
-                               return ret;
-                       }
-               }
-
-               hws_bwc_unlock_all_queues(ctx);
-               mutex_lock(queue_lock);
-       }
-
-       /* check if number of rules require rehash */
-       num_of_rules = bwc_matcher->num_of_rules;
-
-       if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) {
-               mutex_unlock(queue_lock);
-
-               hws_bwc_lock_all_queues(ctx);
-               ret = hws_bwc_matcher_rehash_size(bwc_matcher);
-               hws_bwc_unlock_all_queues(ctx);
-
-               if (ret) {
-                       mlx5hws_err(ctx, "BWC rule insertion: rehash size [%d -> %d] failed (%d)\n",
-                                   bwc_matcher->size_log - MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
-                                   bwc_matcher->size_log,
-                                   ret);
-                       return ret;
-               }
-
-               mutex_lock(queue_lock);
-       }
-
-       ret = hws_bwc_rule_create_sync(bwc_rule,
-                                      match_param,
-                                      at_idx,
-                                      rule_actions,
-                                      &rule_attr);
-       if (likely(!ret)) {
-               hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx);
-               mutex_unlock(queue_lock);
-               return 0; /* rule inserted successfully */
-       }
-
-       /* At this point the rule wasn't added.
-        * It could be because there was collision, or some other problem.
-        * If we don't dive deeper than API, the only thing we know is that
-        * the status of completion is RTE_FLOW_OP_ERROR.
-        * Try rehash by size and insert rule again - last chance.
-        */
-
-       mutex_unlock(queue_lock);
-
-       hws_bwc_lock_all_queues(ctx);
-       ret = hws_bwc_matcher_rehash_size(bwc_matcher);
-       hws_bwc_unlock_all_queues(ctx);
-
-       if (ret) {
-               mlx5hws_err(ctx, "BWC rule insertion: rehash failed (%d)\n", ret);
-               return ret;
-       }
-
-       /* Rehash done, but we still have that pesky rule to add */
-       mutex_lock(queue_lock);
-
-       ret = hws_bwc_rule_create_sync(bwc_rule,
-                                      match_param,
-                                      at_idx,
-                                      rule_actions,
-                                      &rule_attr);
-
-       if (unlikely(ret)) {
-               mutex_unlock(queue_lock);
-               mlx5hws_err(ctx, "BWC rule insertion failed (%d)\n", ret);
-               return ret;
-       }
-
-       hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx);
-       mutex_unlock(queue_lock);
-
-       return 0;
-}
-
-struct mlx5hws_bwc_rule *
-mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher,
-                       struct mlx5hws_match_parameters *params,
-                       u32 flow_source,
-                       struct mlx5hws_rule_action rule_actions[])
-{
-       struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
-       struct mlx5hws_bwc_rule *bwc_rule;
-       u16 bwc_queue_idx;
-       int ret;
-
-       if (unlikely(!mlx5hws_context_bwc_supported(ctx))) {
-               mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n");
-               return NULL;
-       }
-
-       bwc_rule = mlx5hws_bwc_rule_alloc(bwc_matcher);
-       if (unlikely(!bwc_rule))
-               return NULL;
-
-       bwc_queue_idx = hws_bwc_gen_queue_idx(ctx);
-
-       ret = mlx5hws_bwc_rule_create_simple(bwc_rule,
-                                            params->match_buf,
-                                            rule_actions,
-                                            flow_source,
-                                            bwc_queue_idx);
-       if (unlikely(ret)) {
-               mlx5hws_bwc_rule_free(bwc_rule);
-               return NULL;
-       }
-
-       return bwc_rule;
-}
-
-static int
-hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
-                          struct mlx5hws_rule_action rule_actions[])
-{
-       struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
-       struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
-       struct mlx5hws_rule_attr rule_attr;
-       struct mutex *queue_lock; /* Protect the queue */
-       int at_idx, ret;
-       u16 idx;
-
-       idx = bwc_rule->bwc_queue_idx;
-
-       mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, 0, &rule_attr);
-       queue_lock = hws_bwc_get_queue_lock(ctx, idx);
-
-       mutex_lock(queue_lock);
-
-       /* check if rehash needed due to missing action template */
-       at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
-       if (unlikely(at_idx < 0)) {
-               /* we need to extend BWC matcher action templates array */
-               mutex_unlock(queue_lock);
-               hws_bwc_lock_all_queues(ctx);
-
-               /* check again - perhaps other thread already did extend_at */
-               at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
-               if (likely(at_idx < 0)) {
-                       ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
-                       if (unlikely(ret)) {
-                               hws_bwc_unlock_all_queues(ctx);
-                               mlx5hws_err(ctx, "BWC rule update: failed extending AT (%d)", ret);
-                               return -EINVAL;
-                       }
-
-                       /* action templates array was extended, we need the last idx */
-                       at_idx = bwc_matcher->num_of_at - 1;
-
-                       ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
-                                                       bwc_matcher->at[at_idx]);
-                       if (unlikely(ret)) {
-                               /* Action template attach failed, possibly due to
-                                * requiring more action STEs.
-                                * Need to attempt creating new matcher with all
-                                * the action templates, including the new one.
-                                */
-                               ret = hws_bwc_matcher_rehash_at(bwc_matcher);
-                               if (unlikely(ret)) {
-                                       mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]);
-                                       bwc_matcher->at[at_idx] = NULL;
-                                       bwc_matcher->num_of_at--;
-
-                                       hws_bwc_unlock_all_queues(ctx);
-
-                                       mlx5hws_err(ctx,
-                                                   "BWC rule update: rehash AT failed (%d)\n",
-                                                   ret);
-                                       return ret;
-                               }
-                       }
-               }
-
-               hws_bwc_unlock_all_queues(ctx);
-               mutex_lock(queue_lock);
-       }
-
-       ret = hws_bwc_rule_update_sync(bwc_rule,
-                                      at_idx,
-                                      rule_actions,
-                                      &rule_attr);
-       mutex_unlock(queue_lock);
-
-       if (unlikely(ret))
-               mlx5hws_err(ctx, "BWC rule: update failed (%d)\n", ret);
-
-       return ret;
-}
-
-int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
-                                  struct mlx5hws_rule_action rule_actions[])
-{
-       struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
-       struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
-
-       if (unlikely(!mlx5hws_context_bwc_supported(ctx))) {
-               mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n");
-               return -EINVAL;
-       }
-
-       return hws_bwc_rule_action_update(bwc_rule, rule_actions);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h
deleted file mode 100644 (file)
index 4fe8c32..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#ifndef MLX5HWS_BWC_H_
-#define MLX5HWS_BWC_H_
-
-#define MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG 1
-#define MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP 1
-#define MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH 70
-#define MLX5HWS_BWC_MATCHER_REHASH_BURST_TH 32
-#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 255
-
-#define MLX5HWS_BWC_MAX_ACTS 16
-
-struct mlx5hws_bwc_matcher {
-       struct mlx5hws_matcher *matcher;
-       struct mlx5hws_match_template *mt;
-       struct mlx5hws_action_template *at[MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM];
-       u8 num_of_at;
-       u16 priority;
-       u8 size_log;
-       u32 num_of_rules; /* atomically accessed */
-       struct list_head *rules;
-};
-
-struct mlx5hws_bwc_rule {
-       struct mlx5hws_bwc_matcher *bwc_matcher;
-       struct mlx5hws_rule *rule;
-       u16 bwc_queue_idx;
-       struct list_head list_node;
-};
-
-int
-mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
-                                 struct mlx5hws_table *table,
-                                 u32 priority,
-                                 u8 match_criteria_enable,
-                                 struct mlx5hws_match_parameters *mask,
-                                 enum mlx5hws_action_type action_types[]);
-
-int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher);
-
-struct mlx5hws_bwc_rule *mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher);
-
-void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule);
-
-int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
-                                  u32 *match_param,
-                                  struct mlx5hws_rule_action rule_actions[],
-                                  u32 flow_source,
-                                  u16 bwc_queue_idx);
-
-int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule);
-
-void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
-                               u16 bwc_queue_idx,
-                               u32 flow_source,
-                               struct mlx5hws_rule_attr *rule_attr);
-
-static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx)
-{
-       /* Besides the control queue, half of the queues are
-        * reguler HWS queues, and the other half are BWC queues.
-        */
-       return (ctx->queues - 1) / 2;
-}
-
-static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx)
-{
-       return idx + mlx5hws_bwc_queues(ctx);
-}
-
-#endif /* MLX5HWS_BWC_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c
deleted file mode 100644 (file)
index 601fad5..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#include "mlx5hws_internal.h"
-
-bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
-                                        u8 match_criteria_enable,
-                                        struct mlx5hws_match_parameters *mask)
-{
-       struct mlx5hws_definer match_layout = {0};
-       struct mlx5hws_match_template *mt;
-       bool is_complex = false;
-       int ret;
-
-       if (!match_criteria_enable)
-               return false; /* empty matcher */
-
-       mt = mlx5hws_match_template_create(ctx,
-                                          mask->match_buf,
-                                          mask->match_sz,
-                                          match_criteria_enable);
-       if (!mt) {
-               mlx5hws_err(ctx, "BWC: failed creating match template\n");
-               return false;
-       }
-
-       ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
-       if (ret) {
-               /* The only case that we're interested in is E2BIG,
-                * which means that the match parameters need to be
-                * split into complex martcher.
-                * For all other cases (good or bad) - just return true
-                * and let the usual match creation path handle it,
-                * both for good and bad flows.
-                */
-               if (ret == -E2BIG) {
-                       is_complex = true;
-                       mlx5hws_dbg(ctx, "Matcher definer layout: need complex matcher\n");
-               } else {
-                       mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
-               }
-       }
-
-       mlx5hws_match_template_destroy(mt);
-
-       return is_complex;
-}
-
-int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
-                                      struct mlx5hws_table *table,
-                                      u32 priority,
-                                      u8 match_criteria_enable,
-                                      struct mlx5hws_match_parameters *mask)
-{
-       mlx5hws_err(table->ctx, "Complex matcher is not supported yet\n");
-       return -EOPNOTSUPP;
-}
-
-void
-mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
-{
-       /* nothing to do here */
-}
-
-int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
-                                   struct mlx5hws_match_parameters *params,
-                                   u32 flow_source,
-                                   struct mlx5hws_rule_action rule_actions[],
-                                   u16 bwc_queue_idx)
-{
-       mlx5hws_err(bwc_rule->bwc_matcher->matcher->tbl->ctx,
-                   "Complex rule is not supported yet\n");
-       return -EOPNOTSUPP;
-}
-
-int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule)
-{
-       return 0;
-}
-
-int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
-{
-       mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
-                   "Moving complex rule is not supported yet\n");
-       return -EOPNOTSUPP;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h
deleted file mode 100644 (file)
index 068ee81..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#ifndef MLX5HWS_BWC_COMPLEX_H_
-#define MLX5HWS_BWC_COMPLEX_H_
-
-bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
-                                        u8 match_criteria_enable,
-                                        struct mlx5hws_match_parameters *mask);
-
-int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
-                                      struct mlx5hws_table *table,
-                                      u32 priority,
-                                      u8 match_criteria_enable,
-                                      struct mlx5hws_match_parameters *mask);
-
-void mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
-
-int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
-
-int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
-                                   struct mlx5hws_match_parameters *params,
-                                   u32 flow_source,
-                                   struct mlx5hws_rule_action rule_actions[],
-                                   u16 bwc_queue_idx);
-
-int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule);
-
-#endif /* MLX5HWS_BWC_COMPLEX_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c
deleted file mode 100644 (file)
index 2c7b141..0000000
+++ /dev/null
@@ -1,1300 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#include "mlx5hws_internal.h"
-
-static enum mlx5_ifc_flow_destination_type
-hws_cmd_dest_type_to_ifc_dest_type(enum mlx5_flow_destination_type type)
-{
-       switch (type) {
-       case MLX5_FLOW_DESTINATION_TYPE_VPORT:
-               return MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
-       case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
-               return MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-       case MLX5_FLOW_DESTINATION_TYPE_TIR:
-               return MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
-       case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
-               return MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
-       case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
-               return MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
-       case MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE:
-               return MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE;
-       case MLX5_FLOW_DESTINATION_TYPE_NONE:
-       case MLX5_FLOW_DESTINATION_TYPE_PORT:
-       case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
-       case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
-       case MLX5_FLOW_DESTINATION_TYPE_RANGE:
-       default:
-               pr_warn("HWS: unknown flow dest type %d\n", type);
-               return 0;
-       }
-};
-
-static int hws_cmd_general_obj_destroy(struct mlx5_core_dev *mdev,
-                                      u32 object_type,
-                                      u32 object_id)
-{
-       u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
-       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
-
-       MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
-       MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, object_type);
-       MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, object_id);
-
-       return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-}
-
-int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev,
-                                 struct mlx5hws_cmd_ft_create_attr *ft_attr,
-                                 u32 *table_id)
-{
-       u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
-       u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
-       void *ft_ctx;
-       int ret;
-
-       MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
-       MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type);
-
-       ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
-       MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
-       MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid);
-       MLX5_SET(flow_table_context, ft_ctx, reformat_en, ft_attr->reformat_en);
-       MLX5_SET(flow_table_context, ft_ctx, decap_en, ft_attr->decap_en);
-
-       ret = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out);
-       if (ret)
-               return ret;
-
-       *table_id = MLX5_GET(create_flow_table_out, out, table_id);
-
-       return 0;
-}
-
-int mlx5hws_cmd_flow_table_modify(struct mlx5_core_dev *mdev,
-                                 struct mlx5hws_cmd_ft_modify_attr *ft_attr,
-                                 u32 table_id)
-{
-       u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
-       void *ft_ctx;
-
-       MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE);
-       MLX5_SET(modify_flow_table_in, in, table_type, ft_attr->type);
-       MLX5_SET(modify_flow_table_in, in, modify_field_select, ft_attr->modify_fs);
-       MLX5_SET(modify_flow_table_in, in, table_id, table_id);
-
-       ft_ctx = MLX5_ADDR_OF(modify_flow_table_in, in, flow_table_context);
-
-       MLX5_SET(flow_table_context, ft_ctx, table_miss_action, ft_attr->table_miss_action);
-       MLX5_SET(flow_table_context, ft_ctx, table_miss_id, ft_attr->table_miss_id);
-       MLX5_SET(flow_table_context, ft_ctx, hws.rtc_id_0, ft_attr->rtc_id_0);
-       MLX5_SET(flow_table_context, ft_ctx, hws.rtc_id_1, ft_attr->rtc_id_1);
-
-       return mlx5_cmd_exec_in(mdev, modify_flow_table, in);
-}
-
-int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev,
-                                u32 table_id,
-                                struct mlx5hws_cmd_ft_query_attr *ft_attr,
-                                u64 *icm_addr_0, u64 *icm_addr_1)
-{
-       u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {0};
-       u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {0};
-       void *ft_ctx;
-       int ret;
-
-       MLX5_SET(query_flow_table_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_TABLE);
-       MLX5_SET(query_flow_table_in, in, table_type, ft_attr->type);
-       MLX5_SET(query_flow_table_in, in, table_id, table_id);
-
-       ret = mlx5_cmd_exec_inout(mdev, query_flow_table, in, out);
-       if (ret)
-               return ret;
-
-       ft_ctx = MLX5_ADDR_OF(query_flow_table_out, out, flow_table_context);
-       *icm_addr_0 = MLX5_GET64(flow_table_context, ft_ctx, sws.sw_owner_icm_root_0);
-       *icm_addr_1 = MLX5_GET64(flow_table_context, ft_ctx, sws.sw_owner_icm_root_1);
-
-       return ret;
-}
-
-int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
-                                  u8 fw_ft_type, u32 table_id)
-{
-       u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
-
-       MLX5_SET(destroy_flow_table_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE);
-       MLX5_SET(destroy_flow_table_in, in, table_type, fw_ft_type);
-       MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
-
-       return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
-}
-
-void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev,
-                                         u32 table_id)
-{
-       hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_FT_ALIAS, table_id);
-}
-
-static int hws_cmd_flow_group_create(struct mlx5_core_dev *mdev,
-                                    struct mlx5hws_cmd_fg_attr *fg_attr,
-                                    u32 *group_id)
-{
-       u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
-       int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
-       u32 *in;
-       int ret;
-
-       in = kvzalloc(inlen, GFP_KERNEL);
-       if (!in)
-               return -ENOMEM;
-
-       MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
-       MLX5_SET(create_flow_group_in, in, table_type, fg_attr->table_type);
-       MLX5_SET(create_flow_group_in, in, table_id, fg_attr->table_id);
-
-       ret = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out);
-       if (ret)
-               goto out;
-
-       *group_id = MLX5_GET(create_flow_group_out, out, group_id);
-
-out:
-       kvfree(in);
-       return ret;
-}
-
-static int hws_cmd_flow_group_destroy(struct mlx5_core_dev *mdev,
-                                     u32 ft_id, u32 fg_id, u8 ft_type)
-{
-       u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
-
-       MLX5_SET(destroy_flow_group_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
-       MLX5_SET(destroy_flow_group_in, in, table_type, ft_type);
-       MLX5_SET(destroy_flow_group_in, in, table_id, ft_id);
-       MLX5_SET(destroy_flow_group_in, in, group_id, fg_id);
-
-       return mlx5_cmd_exec_in(mdev, destroy_flow_group, in);
-}
-
-int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev,
-                       u32 table_type,
-                       u32 table_id,
-                       u32 group_id,
-                       struct mlx5hws_cmd_set_fte_attr *fte_attr)
-{
-       u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
-       void *in_flow_context;
-       u32 dest_entry_sz;
-       u32 total_dest_sz;
-       u32 action_flags;
-       u8 *in_dests;
-       u32 inlen;
-       u32 *in;
-       int ret;
-       u32 i;
-
-       dest_entry_sz = fte_attr->extended_dest ?
-                       MLX5_ST_SZ_BYTES(extended_dest_format) :
-                       MLX5_ST_SZ_BYTES(dest_format);
-       total_dest_sz = dest_entry_sz * fte_attr->dests_num;
-       inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE);
-       in = kzalloc(inlen, GFP_KERNEL);
-       if (!in)
-               return -ENOMEM;
-
-       MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
-       MLX5_SET(set_fte_in, in, table_type, table_type);
-       MLX5_SET(set_fte_in, in, table_id, table_id);
-
-       in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
-       MLX5_SET(flow_context, in_flow_context, group_id, group_id);
-       MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source);
-       MLX5_SET(flow_context, in_flow_context, extended_destination, fte_attr->extended_dest);
-       MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
-
-       action_flags = fte_attr->action_flags;
-       MLX5_SET(flow_context, in_flow_context, action, action_flags);
-
-       if (action_flags & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
-               MLX5_SET(flow_context, in_flow_context,
-                        packet_reformat_id, fte_attr->packet_reformat_id);
-       }
-
-       if (action_flags & (MLX5_FLOW_CONTEXT_ACTION_DECRYPT | MLX5_FLOW_CONTEXT_ACTION_ENCRYPT)) {
-               MLX5_SET(flow_context, in_flow_context,
-                        encrypt_decrypt_type, fte_attr->encrypt_decrypt_type);
-               MLX5_SET(flow_context, in_flow_context,
-                        encrypt_decrypt_obj_id, fte_attr->encrypt_decrypt_obj_id);
-       }
-
-       if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
-               in_dests = (u8 *)MLX5_ADDR_OF(flow_context, in_flow_context, destination);
-
-               for (i = 0; i < fte_attr->dests_num; i++) {
-                       struct mlx5hws_cmd_set_fte_dest *dest = &fte_attr->dests[i];
-                       enum mlx5_ifc_flow_destination_type ifc_dest_type =
-                               hws_cmd_dest_type_to_ifc_dest_type(dest->destination_type);
-
-                       switch (dest->destination_type) {
-                       case MLX5_FLOW_DESTINATION_TYPE_VPORT:
-                               if (dest->ext_flags & MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID) {
-                                       MLX5_SET(dest_format, in_dests,
-                                                destination_eswitch_owner_vhca_id_valid, 1);
-                                       MLX5_SET(dest_format, in_dests,
-                                                destination_eswitch_owner_vhca_id,
-                                                dest->esw_owner_vhca_id);
-                               }
-                               fallthrough;
-                       case MLX5_FLOW_DESTINATION_TYPE_TIR:
-                       case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
-                               MLX5_SET(dest_format, in_dests, destination_type, ifc_dest_type);
-                               MLX5_SET(dest_format, in_dests, destination_id,
-                                        dest->destination_id);
-                               if (dest->ext_flags & MLX5HWS_CMD_EXT_DEST_REFORMAT) {
-                                       MLX5_SET(dest_format, in_dests, packet_reformat, 1);
-                                       MLX5_SET(extended_dest_format, in_dests, packet_reformat_id,
-                                                dest->ext_reformat_id);
-                               }
-                               break;
-                       default:
-                               ret = -EOPNOTSUPP;
-                               goto out;
-                       }
-
-                       in_dests = in_dests + dest_entry_sz;
-               }
-               MLX5_SET(flow_context, in_flow_context, destination_list_size, fte_attr->dests_num);
-       }
-
-       ret = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
-       if (ret)
-               mlx5_core_err(mdev, "Failed creating FLOW_TABLE_ENTRY\n");
-
-out:
-       kfree(in);
-       return ret;
-}
-
-int mlx5hws_cmd_delete_fte(struct mlx5_core_dev *mdev,
-                          u32 table_type,
-                          u32 table_id)
-{
-       u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
-
-       MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
-       MLX5_SET(delete_fte_in, in, table_type, table_type);
-       MLX5_SET(delete_fte_in, in, table_id, table_id);
-
-       return mlx5_cmd_exec_in(mdev, delete_fte, in);
-}
-
-struct mlx5hws_cmd_forward_tbl *
-mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev,
-                              struct mlx5hws_cmd_ft_create_attr *ft_attr,
-                              struct mlx5hws_cmd_set_fte_attr *fte_attr)
-{
-       struct mlx5hws_cmd_fg_attr fg_attr = {0};
-       struct mlx5hws_cmd_forward_tbl *tbl;
-       int ret;
-
-       tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
-       if (!tbl)
-               return NULL;
-
-       ret = mlx5hws_cmd_flow_table_create(mdev, ft_attr, &tbl->ft_id);
-       if (ret) {
-               mlx5_core_err(mdev, "Failed to create FT\n");
-               goto free_tbl;
-       }
-
-       fg_attr.table_id = tbl->ft_id;
-       fg_attr.table_type = ft_attr->type;
-
-       ret = hws_cmd_flow_group_create(mdev, &fg_attr, &tbl->fg_id);
-       if (ret) {
-               mlx5_core_err(mdev, "Failed to create FG\n");
-               goto free_ft;
-       }
-
-       ret = mlx5hws_cmd_set_fte(mdev, ft_attr->type,
-                                 tbl->ft_id, tbl->fg_id, fte_attr);
-       if (ret) {
-               mlx5_core_err(mdev, "Failed to create FTE\n");
-               goto free_fg;
-       }
-
-       tbl->type = ft_attr->type;
-       return tbl;
-
-free_fg:
-       hws_cmd_flow_group_destroy(mdev, tbl->ft_id, tbl->fg_id, ft_attr->type);
-free_ft:
-       mlx5hws_cmd_flow_table_destroy(mdev, ft_attr->type, tbl->ft_id);
-free_tbl:
-       kfree(tbl);
-       return NULL;
-}
-
-void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev,
-                                    struct mlx5hws_cmd_forward_tbl *tbl)
-{
-       mlx5hws_cmd_delete_fte(mdev, tbl->type, tbl->ft_id);
-       hws_cmd_flow_group_destroy(mdev, tbl->ft_id, tbl->fg_id, tbl->type);
-       mlx5hws_cmd_flow_table_destroy(mdev, tbl->type, tbl->ft_id);
-       kfree(tbl);
-}
-
-void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
-                                          u32 fw_ft_type,
-                                          enum mlx5hws_table_type type,
-                                          struct mlx5hws_cmd_ft_modify_attr *ft_attr)
-{
-       u32 default_miss_tbl;
-
-       if (type != MLX5HWS_TABLE_TYPE_FDB)
-               return;
-
-       ft_attr->modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
-       ft_attr->type = fw_ft_type;
-       ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
-
-       default_miss_tbl = ctx->common_res[type].default_miss->ft_id;
-       if (!default_miss_tbl) {
-               pr_warn("HWS: no flow table ID for default miss\n");
-               return;
-       }
-
-       ft_attr->table_miss_id = default_miss_tbl;
-}
-
-int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev,
-                          struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
-                          u32 *rtc_id)
-{
-       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
-       u32 in[MLX5_ST_SZ_DW(create_rtc_in)] = {0};
-       void *attr;
-       int ret;
-
-       attr = MLX5_ADDR_OF(create_rtc_in, in, hdr);
-       MLX5_SET(general_obj_in_cmd_hdr,
-                attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
-       MLX5_SET(general_obj_in_cmd_hdr,
-                attr, obj_type, MLX5_OBJ_TYPE_RTC);
-
-       attr = MLX5_ADDR_OF(create_rtc_in, in, rtc);
-       MLX5_SET(rtc, attr, ste_format_0, rtc_attr->is_frst_jumbo ?
-                MLX5_IFC_RTC_STE_FORMAT_11DW :
-                MLX5_IFC_RTC_STE_FORMAT_8DW);
-
-       if (rtc_attr->is_scnd_range) {
-               MLX5_SET(rtc, attr, ste_format_1, MLX5_IFC_RTC_STE_FORMAT_RANGE);
-               MLX5_SET(rtc, attr, num_match_ste, 2);
-       }
-
-       MLX5_SET(rtc, attr, pd, rtc_attr->pd);
-       MLX5_SET(rtc, attr, update_method, rtc_attr->fw_gen_wqe);
-       MLX5_SET(rtc, attr, update_index_mode, rtc_attr->update_index_mode);
-       MLX5_SET(rtc, attr, access_index_mode, rtc_attr->access_index_mode);
-       MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
-       MLX5_SET(rtc, attr, log_depth, rtc_attr->log_depth);
-       MLX5_SET(rtc, attr, log_hash_size, rtc_attr->log_size);
-       MLX5_SET(rtc, attr, table_type, rtc_attr->table_type);
-       MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
-       MLX5_SET(rtc, attr, match_definer_0, rtc_attr->match_definer_0);
-       MLX5_SET(rtc, attr, match_definer_1, rtc_attr->match_definer_1);
-       MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base);
-       MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base);
-       MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset);
-       MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id);
-       MLX5_SET(rtc, attr, reparse_mode, rtc_attr->reparse_mode);
-
-       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-       if (ret) {
-               mlx5_core_err(mdev, "Failed to create RTC\n");
-               goto out;
-       }
-
-       *rtc_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
-out:
-       return ret;
-}
-
-void mlx5hws_cmd_rtc_destroy(struct mlx5_core_dev *mdev, u32 rtc_id)
-{
-       hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_RTC, rtc_id);
-}
-
-int mlx5hws_cmd_stc_create(struct mlx5_core_dev *mdev,
-                          struct mlx5hws_cmd_stc_create_attr *stc_attr,
-                          u32 *stc_id)
-{
-       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
-       u32 in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
-       void *attr;
-       int ret;
-
-       attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
-       MLX5_SET(general_obj_in_cmd_hdr,
-                attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
-       MLX5_SET(general_obj_in_cmd_hdr,
-                attr, obj_type, MLX5_OBJ_TYPE_STC);
-       MLX5_SET(general_obj_in_cmd_hdr,
-                attr, op_param.create.log_obj_range, stc_attr->log_obj_range);
-
-       attr = MLX5_ADDR_OF(create_stc_in, in, stc);
-       MLX5_SET(stc, attr, table_type, stc_attr->table_type);
-
-       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-       if (ret) {
-               mlx5_core_err(mdev, "Failed to create STC\n");
-               goto out;
-       }
-
-       *stc_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
-out:
-       return ret;
-}
-
-void mlx5hws_cmd_stc_destroy(struct mlx5_core_dev *mdev, u32 stc_id)
-{
-       hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_STC, stc_id);
-}
-
-static int
-hws_cmd_stc_modify_set_stc_param(struct mlx5_core_dev *mdev,
-                                struct mlx5hws_cmd_stc_modify_attr *stc_attr,
-                                void *stc_param)
-{
-       switch (stc_attr->action_type) {
-       case MLX5_IFC_STC_ACTION_TYPE_COUNTER:
-               MLX5_SET(stc_ste_param_flow_counter, stc_param, flow_counter_id, stc_attr->id);
-               break;
-       case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR:
-               MLX5_SET(stc_ste_param_tir, stc_param, tirn, stc_attr->dest_tir_num);
-               break;
-       case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT:
-               MLX5_SET(stc_ste_param_table, stc_param, table_id, stc_attr->dest_table_id);
-               break;
-       case MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST:
-               MLX5_SET(stc_ste_param_header_modify_list, stc_param,
-                        header_modify_pattern_id, stc_attr->modify_header.pattern_id);
-               MLX5_SET(stc_ste_param_header_modify_list, stc_param,
-                        header_modify_argument_id, stc_attr->modify_header.arg_id);
-               break;
-       case MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE:
-               MLX5_SET(stc_ste_param_remove, stc_param, action_type,
-                        MLX5_MODIFICATION_TYPE_REMOVE);
-               MLX5_SET(stc_ste_param_remove, stc_param, decap,
-                        stc_attr->remove_header.decap);
-               MLX5_SET(stc_ste_param_remove, stc_param, remove_start_anchor,
-                        stc_attr->remove_header.start_anchor);
-               MLX5_SET(stc_ste_param_remove, stc_param, remove_end_anchor,
-                        stc_attr->remove_header.end_anchor);
-               break;
-       case MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT:
-               MLX5_SET(stc_ste_param_insert, stc_param, action_type,
-                        MLX5_MODIFICATION_TYPE_INSERT);
-               MLX5_SET(stc_ste_param_insert, stc_param, encap,
-                        stc_attr->insert_header.encap);
-               MLX5_SET(stc_ste_param_insert, stc_param, inline_data,
-                        stc_attr->insert_header.is_inline);
-               MLX5_SET(stc_ste_param_insert, stc_param, insert_anchor,
-                        stc_attr->insert_header.insert_anchor);
-               /* HW gets the next 2 sizes in words */
-               MLX5_SET(stc_ste_param_insert, stc_param, insert_size,
-                        stc_attr->insert_header.header_size / W_SIZE);
-               MLX5_SET(stc_ste_param_insert, stc_param, insert_offset,
-                        stc_attr->insert_header.insert_offset / W_SIZE);
-               MLX5_SET(stc_ste_param_insert, stc_param, insert_argument,
-                        stc_attr->insert_header.arg_id);
-               break;
-       case MLX5_IFC_STC_ACTION_TYPE_COPY:
-       case MLX5_IFC_STC_ACTION_TYPE_SET:
-       case MLX5_IFC_STC_ACTION_TYPE_ADD:
-       case MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD:
-               *(__be64 *)stc_param = stc_attr->modify_action.data;
-               break;
-       case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT:
-       case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK:
-               MLX5_SET(stc_ste_param_vport, stc_param, vport_number,
-                        stc_attr->vport.vport_num);
-               MLX5_SET(stc_ste_param_vport, stc_param, eswitch_owner_vhca_id,
-                        stc_attr->vport.esw_owner_vhca_id);
-               MLX5_SET(stc_ste_param_vport, stc_param, eswitch_owner_vhca_id_valid,
-                        stc_attr->vport.eswitch_owner_vhca_id_valid);
-               break;
-       case MLX5_IFC_STC_ACTION_TYPE_DROP:
-       case MLX5_IFC_STC_ACTION_TYPE_NOP:
-       case MLX5_IFC_STC_ACTION_TYPE_TAG:
-       case MLX5_IFC_STC_ACTION_TYPE_ALLOW:
-               break;
-       case MLX5_IFC_STC_ACTION_TYPE_ASO:
-               MLX5_SET(stc_ste_param_execute_aso, stc_param, aso_object_id,
-                        stc_attr->aso.devx_obj_id);
-               MLX5_SET(stc_ste_param_execute_aso, stc_param, return_reg_id,
-                        stc_attr->aso.return_reg_id);
-               MLX5_SET(stc_ste_param_execute_aso, stc_param, aso_type,
-                        stc_attr->aso.aso_type);
-               break;
-       case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE:
-               MLX5_SET(stc_ste_param_ste_table, stc_param, ste_obj_id,
-                        stc_attr->ste_table.ste_obj_id);
-               MLX5_SET(stc_ste_param_ste_table, stc_param, match_definer_id,
-                        stc_attr->ste_table.match_definer_id);
-               MLX5_SET(stc_ste_param_ste_table, stc_param, log_hash_size,
-                        stc_attr->ste_table.log_hash_size);
-               break;
-       case MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS:
-               MLX5_SET(stc_ste_param_remove_words, stc_param, action_type,
-                        MLX5_MODIFICATION_TYPE_REMOVE_WORDS);
-               MLX5_SET(stc_ste_param_remove_words, stc_param, remove_start_anchor,
-                        stc_attr->remove_words.start_anchor);
-               MLX5_SET(stc_ste_param_remove_words, stc_param,
-                        remove_size, stc_attr->remove_words.num_of_words);
-               break;
-       case MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_ENCRYPTION:
-               MLX5_SET(stc_ste_param_ipsec_encrypt, stc_param, ipsec_object_id,
-                        stc_attr->id);
-               break;
-       case MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_DECRYPTION:
-               MLX5_SET(stc_ste_param_ipsec_decrypt, stc_param, ipsec_object_id,
-                        stc_attr->id);
-               break;
-       case MLX5_IFC_STC_ACTION_TYPE_TRAILER:
-               MLX5_SET(stc_ste_param_trailer, stc_param, command,
-                        stc_attr->reformat_trailer.op);
-               MLX5_SET(stc_ste_param_trailer, stc_param, type,
-                        stc_attr->reformat_trailer.type);
-               MLX5_SET(stc_ste_param_trailer, stc_param, length,
-                        stc_attr->reformat_trailer.size);
-               break;
-       default:
-               mlx5_core_err(mdev, "Not supported type %d\n", stc_attr->action_type);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-int mlx5hws_cmd_stc_modify(struct mlx5_core_dev *mdev,
-                          u32 stc_id,
-                          struct mlx5hws_cmd_stc_modify_attr *stc_attr)
-{
-       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
-       u32 in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
-       void *stc_param;
-       void *attr;
-       int ret;
-
-       attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
-       MLX5_SET(general_obj_in_cmd_hdr,
-                attr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
-       MLX5_SET(general_obj_in_cmd_hdr,
-                attr, obj_type, MLX5_OBJ_TYPE_STC);
-       MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, stc_id);
-       MLX5_SET(general_obj_in_cmd_hdr, in,
-                op_param.query.obj_offset, stc_attr->stc_offset);
-
-       attr = MLX5_ADDR_OF(create_stc_in, in, stc);
-       MLX5_SET(stc, attr, ste_action_offset, stc_attr->action_offset);
-       MLX5_SET(stc, attr, action_type, stc_attr->action_type);
-       MLX5_SET(stc, attr, reparse_mode, stc_attr->reparse_mode);
-       MLX5_SET64(stc, attr, modify_field_select,
-                  MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC);
-
-       /* Set destination TIRN, TAG, FT ID, STE ID */
-       stc_param = MLX5_ADDR_OF(stc, attr, stc_param);
-       ret = hws_cmd_stc_modify_set_stc_param(mdev, stc_attr, stc_param);
-       if (ret)
-               return ret;
-
-       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-       if (ret)
-               mlx5_core_err(mdev, "Failed to modify STC FW action_type %d\n",
-                             stc_attr->action_type);
-
-       return ret;
-}
-
-int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
-                          u16 log_obj_range,
-                          u32 pd,
-                          u32 *arg_id)
-{
-       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
-       u32 in[MLX5_ST_SZ_DW(create_arg_in)] = {0};
-       void *attr;
-       int ret;
-
-       attr = MLX5_ADDR_OF(create_arg_in, in, hdr);
-       MLX5_SET(general_obj_in_cmd_hdr,
-                attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
-       MLX5_SET(general_obj_in_cmd_hdr,
-                attr, obj_type, MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT);
-       MLX5_SET(general_obj_in_cmd_hdr,
-                attr, op_param.create.log_obj_range, log_obj_range);
-
-       attr = MLX5_ADDR_OF(create_arg_in, in, arg);
-       MLX5_SET(arg, attr, access_pd, pd);
-
-       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-       if (ret) {
-               mlx5_core_err(mdev, "Failed to create ARG\n");
-               goto out;
-       }
-
-       *arg_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
-out:
-       return ret;
-}
-
-void mlx5hws_cmd_arg_destroy(struct mlx5_core_dev *mdev,
-                            u32 arg_id)
-{
-       hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT, arg_id);
-}
-
-int mlx5hws_cmd_header_modify_pattern_create(struct mlx5_core_dev *mdev,
-                                            u32 pattern_length,
-                                            u8 *actions,
-                                            u32 *ptrn_id)
-{
-       u32 in[MLX5_ST_SZ_DW(create_header_modify_pattern_in)] = {0};
-       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
-       int num_of_actions;
-       u64 *pattern_data;
-       void *pattern;
-       void *attr;
-       int ret;
-       int i;
-
-       if (pattern_length > MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY) {
-               mlx5_core_err(mdev, "Pattern length %d exceeds limit %d\n",
-                             pattern_length, MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY);
-               return -EINVAL;
-       }
-
-       attr = MLX5_ADDR_OF(create_header_modify_pattern_in, in, hdr);
-       MLX5_SET(general_obj_in_cmd_hdr,
-                attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
-       MLX5_SET(general_obj_in_cmd_hdr,
-                attr, obj_type, MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN);
-
-       pattern = MLX5_ADDR_OF(create_header_modify_pattern_in, in, pattern);
-       /* Pattern_length is in ddwords */
-       MLX5_SET(header_modify_pattern_in, pattern, pattern_length, pattern_length / (2 * DW_SIZE));
-
-       pattern_data = (u64 *)MLX5_ADDR_OF(header_modify_pattern_in, pattern, pattern_data);
-       memcpy(pattern_data, actions, pattern_length);
-
-       num_of_actions = pattern_length / MLX5HWS_MODIFY_ACTION_SIZE;
-       for (i = 0; i < num_of_actions; i++) {
-               int type;
-
-               type = MLX5_GET(set_action_in, &pattern_data[i], action_type);
-               if (type != MLX5_MODIFICATION_TYPE_COPY &&
-                   type != MLX5_MODIFICATION_TYPE_ADD_FIELD)
-                       /* Action typ-copy use all bytes for control */
-                       MLX5_SET(set_action_in, &pattern_data[i], data, 0);
-       }
-
-       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-       if (ret) {
-               mlx5_core_err(mdev, "Failed to create header_modify_pattern\n");
-               goto out;
-       }
-
-       *ptrn_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
-out:
-       return ret;
-}
-
-void mlx5hws_cmd_header_modify_pattern_destroy(struct mlx5_core_dev *mdev,
-                                              u32 ptrn_id)
-{
-       hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN, ptrn_id);
-}
-
-int mlx5hws_cmd_ste_create(struct mlx5_core_dev *mdev,
-                          struct mlx5hws_cmd_ste_create_attr *ste_attr,
-                          u32 *ste_id)
-{
-       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
-       u32 in[MLX5_ST_SZ_DW(create_ste_in)] = {0};
-       void *attr;
-       int ret;
-
-       attr = MLX5_ADDR_OF(create_ste_in, in, hdr);
-       MLX5_SET(general_obj_in_cmd_hdr,
-                attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
-       MLX5_SET(general_obj_in_cmd_hdr,
-                attr, obj_type, MLX5_OBJ_TYPE_STE);
-       MLX5_SET(general_obj_in_cmd_hdr,
-                attr, op_param.create.log_obj_range, ste_attr->log_obj_range);
-
-       attr = MLX5_ADDR_OF(create_ste_in, in, ste);
-       MLX5_SET(ste, attr, table_type, ste_attr->table_type);
-
-       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-       if (ret) {
-               mlx5_core_err(mdev, "Failed to create STE\n");
-               goto out;
-       }
-
-       *ste_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
-out:
-       return ret;
-}
-
-void mlx5hws_cmd_ste_destroy(struct mlx5_core_dev *mdev, u32 ste_id)
-{
-       hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_STE, ste_id);
-}
-
-int mlx5hws_cmd_definer_create(struct mlx5_core_dev *mdev,
-                              struct mlx5hws_cmd_definer_create_attr *def_attr,
-                              u32 *definer_id)
-{
-       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
-       u32 in[MLX5_ST_SZ_DW(create_definer_in)] = {0};
-       void *ptr;
-       int ret;
-
-       MLX5_SET(general_obj_in_cmd_hdr,
-                in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
-       MLX5_SET(general_obj_in_cmd_hdr,
-                in, obj_type, MLX5_OBJ_TYPE_MATCH_DEFINER);
-
-       ptr = MLX5_ADDR_OF(create_definer_in, in, definer);
-       MLX5_SET(definer, ptr, format_id, MLX5_IFC_DEFINER_FORMAT_ID_SELECT);
-
-       MLX5_SET(definer, ptr, format_select_dw0, def_attr->dw_selector[0]);
-       MLX5_SET(definer, ptr, format_select_dw1, def_attr->dw_selector[1]);
-       MLX5_SET(definer, ptr, format_select_dw2, def_attr->dw_selector[2]);
-       MLX5_SET(definer, ptr, format_select_dw3, def_attr->dw_selector[3]);
-       MLX5_SET(definer, ptr, format_select_dw4, def_attr->dw_selector[4]);
-       MLX5_SET(definer, ptr, format_select_dw5, def_attr->dw_selector[5]);
-       MLX5_SET(definer, ptr, format_select_dw6, def_attr->dw_selector[6]);
-       MLX5_SET(definer, ptr, format_select_dw7, def_attr->dw_selector[7]);
-       MLX5_SET(definer, ptr, format_select_dw8, def_attr->dw_selector[8]);
-
-       MLX5_SET(definer, ptr, format_select_byte0, def_attr->byte_selector[0]);
-       MLX5_SET(definer, ptr, format_select_byte1, def_attr->byte_selector[1]);
-       MLX5_SET(definer, ptr, format_select_byte2, def_attr->byte_selector[2]);
-       MLX5_SET(definer, ptr, format_select_byte3, def_attr->byte_selector[3]);
-       MLX5_SET(definer, ptr, format_select_byte4, def_attr->byte_selector[4]);
-       MLX5_SET(definer, ptr, format_select_byte5, def_attr->byte_selector[5]);
-       MLX5_SET(definer, ptr, format_select_byte6, def_attr->byte_selector[6]);
-       MLX5_SET(definer, ptr, format_select_byte7, def_attr->byte_selector[7]);
-
-       ptr = MLX5_ADDR_OF(definer, ptr, match_mask);
-       memcpy(ptr, def_attr->match_mask, MLX5_FLD_SZ_BYTES(definer, match_mask));
-
-       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-       if (ret) {
-               mlx5_core_err(mdev, "Failed to create Definer\n");
-               goto out;
-       }
-
-       *definer_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
-out:
-       return ret;
-}
-
-void mlx5hws_cmd_definer_destroy(struct mlx5_core_dev *mdev,
-                                u32 definer_id)
-{
-       hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_MATCH_DEFINER, definer_id);
-}
-
-int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
-                                      struct mlx5hws_cmd_packet_reformat_create_attr *attr,
-                                      u32 *reformat_id)
-{
-       u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0};
-       size_t insz, cmd_data_sz, cmd_total_sz;
-       void *prctx;
-       void *pdata;
-       void *in;
-       int ret;
-
-       cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
-       cmd_total_sz += MLX5_ST_SZ_BYTES(packet_reformat_context_in);
-       cmd_data_sz = MLX5_FLD_SZ_BYTES(packet_reformat_context_in, reformat_data);
-       insz = align(cmd_total_sz + attr->data_sz - cmd_data_sz, DW_SIZE);
-       in = kzalloc(insz, GFP_KERNEL);
-       if (!in)
-               return -ENOMEM;
-
-       MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
-                MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
-
-       prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in,
-                            packet_reformat_context);
-       pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
-
-       MLX5_SET(packet_reformat_context_in, prctx, reformat_type, attr->type);
-       MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, attr->reformat_param_0);
-       MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, attr->data_sz);
-       memcpy(pdata, attr->data, attr->data_sz);
-
-       ret = mlx5_cmd_exec(mdev, in, insz, out, sizeof(out));
-       if (ret) {
-               mlx5_core_err(mdev, "Failed to create packet reformat\n");
-               goto out;
-       }
-
-       *reformat_id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id);
-out:
-       kfree(in);
-       return ret;
-}
-
-int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev,
-                                       u32 reformat_id)
-{
-       u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_out)] = {0};
-       u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_in)] = {0};
-       int ret;
-
-       MLX5_SET(dealloc_packet_reformat_in, in, opcode,
-                MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
-       MLX5_SET(dealloc_packet_reformat_in, in,
-                packet_reformat_id, reformat_id);
-
-       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-       if (ret)
-               mlx5_core_err(mdev, "Failed to destroy packet_reformat\n");
-
-       return ret;
-}
-
-int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn)
-{
-       u32 out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
-       u32 in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
-       void *sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
-       int ret;
-
-       MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
-       MLX5_SET(modify_sq_in, in, sqn, sqn);
-       MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
-       MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
-
-       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-       if (ret)
-               mlx5_core_err(mdev, "Failed to modify SQ\n");
-
-       return ret;
-}
-
-int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev,
-                                       struct mlx5hws_cmd_allow_other_vhca_access_attr *attr)
-{
-       u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0};
-       u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0};
-       void *key;
-       int ret;
-
-       MLX5_SET(allow_other_vhca_access_in,
-                in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS);
-       MLX5_SET(allow_other_vhca_access_in,
-                in, object_type_to_be_accessed, attr->obj_type);
-       MLX5_SET(allow_other_vhca_access_in,
-                in, object_id_to_be_accessed, attr->obj_id);
-
-       key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key);
-       memcpy(key, attr->access_key, sizeof(attr->access_key));
-
-       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-       if (ret)
-               mlx5_core_err(mdev, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command\n");
-
-       return ret;
-}
-
-int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev,
-                                struct mlx5hws_cmd_alias_obj_create_attr *alias_attr,
-                                u32 *obj_id)
-{
-       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
-       u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0};
-       void *attr;
-       void *key;
-       int ret;
-
-       attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr);
-       MLX5_SET(general_obj_in_cmd_hdr,
-                attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
-       MLX5_SET(general_obj_in_cmd_hdr,
-                attr, obj_type, alias_attr->obj_type);
-       MLX5_SET(general_obj_in_cmd_hdr, attr, op_param.create.alias_object, 1);
-
-       attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx);
-       MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id);
-       MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id);
-
-       key = MLX5_ADDR_OF(alias_context, attr, access_key);
-       memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key));
-
-       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-       if (ret) {
-               mlx5_core_err(mdev, "Failed to create ALIAS OBJ\n");
-               goto out;
-       }
-
-       *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
-out:
-       return ret;
-}
-
-int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev,
-                                 u16 obj_type,
-                                 u32 obj_id)
-{
-       return hws_cmd_general_obj_destroy(mdev, obj_type, obj_id);
-}
-
-int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev,
-                            struct mlx5hws_cmd_generate_wqe_attr *attr,
-                            struct mlx5_cqe64 *ret_cqe)
-{
-       u32 out[MLX5_ST_SZ_DW(generate_wqe_out)] = {0};
-       u32 in[MLX5_ST_SZ_DW(generate_wqe_in)] = {0};
-       u8 status;
-       void *ptr;
-       int ret;
-
-       MLX5_SET(generate_wqe_in, in, opcode, MLX5_CMD_OP_GENERATE_WQE);
-       MLX5_SET(generate_wqe_in, in, pdn, attr->pdn);
-
-       ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_ctrl);
-       memcpy(ptr, attr->wqe_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_ctrl));
-
-       ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_ctrl);
-       memcpy(ptr, attr->gta_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_ctrl));
-
-       ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_0);
-       memcpy(ptr, attr->gta_data_0, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_0));
-
-       if (attr->gta_data_1) {
-               ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_1);
-               memcpy(ptr, attr->gta_data_1, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_1));
-       }
-
-       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-       if (ret) {
-               mlx5_core_err(mdev, "Failed to write GTA WQE using FW\n");
-               return ret;
-       }
-
-       status = MLX5_GET(generate_wqe_out, out, status);
-       if (status) {
-               mlx5_core_err(mdev, "Invalid FW CQE status %d\n", status);
-               return -EINVAL;
-       }
-
-       ptr = MLX5_ADDR_OF(generate_wqe_out, out, cqe_data);
-       memcpy(ret_cqe, ptr, sizeof(*ret_cqe));
-
-       return ret;
-}
-
-int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev,
-                          struct mlx5hws_cmd_query_caps *caps)
-{
-       u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
-       u32 out_size;
-       u32 *out;
-       int ret;
-
-       out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
-       out = kzalloc(out_size, GFP_KERNEL);
-       if (!out)
-               return -ENOMEM;
-
-       MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
-       MLX5_SET(query_hca_cap_in, in, op_mod,
-                MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE | HCA_CAP_OPMOD_GET_CUR);
-
-       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
-       if (ret) {
-               mlx5_core_err(mdev, "Failed to query device caps\n");
-               goto out;
-       }
-
-       caps->wqe_based_update =
-               MLX5_GET(query_hca_cap_out, out,
-                        capability.cmd_hca_cap.wqe_based_flow_table_update_cap);
-
-       caps->eswitch_manager = MLX5_GET(query_hca_cap_out, out,
-                                        capability.cmd_hca_cap.eswitch_manager);
-
-       caps->flex_protocols = MLX5_GET(query_hca_cap_out, out,
-                                       capability.cmd_hca_cap.flex_parser_protocols);
-
-       if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED)
-               caps->flex_parser_id_geneve_tlv_option_0 =
-                       MLX5_GET(query_hca_cap_out, out,
-                                capability.cmd_hca_cap.flex_parser_id_geneve_tlv_option_0);
-
-       if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)
-               caps->flex_parser_id_mpls_over_gre =
-                       MLX5_GET(query_hca_cap_out, out,
-                                capability.cmd_hca_cap.flex_parser_id_outer_first_mpls_over_gre);
-
-       if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
-               caps->flex_parser_id_mpls_over_udp =
-                       MLX5_GET(query_hca_cap_out, out,
-                                capability.cmd_hca_cap.flex_parser_id_outer_first_mpls_over_udp_label);
-
-       caps->log_header_modify_argument_granularity =
-               MLX5_GET(query_hca_cap_out, out,
-                        capability.cmd_hca_cap.log_header_modify_argument_granularity);
-
-       caps->log_header_modify_argument_granularity -=
-               MLX5_GET(query_hca_cap_out, out,
-                        capability.cmd_hca_cap.log_header_modify_argument_granularity_offset);
-
-       caps->log_header_modify_argument_max_alloc =
-               MLX5_GET(query_hca_cap_out, out,
-                        capability.cmd_hca_cap.log_header_modify_argument_max_alloc);
-
-       caps->definer_format_sup =
-               MLX5_GET64(query_hca_cap_out, out,
-                          capability.cmd_hca_cap.match_definer_format_supported);
-
-       caps->vhca_id = MLX5_GET(query_hca_cap_out, out,
-                                capability.cmd_hca_cap.vhca_id);
-
-       caps->sq_ts_format = MLX5_GET(query_hca_cap_out, out,
-                                     capability.cmd_hca_cap.sq_ts_format);
-
-       caps->ipsec_offload = MLX5_GET(query_hca_cap_out, out,
-                                      capability.cmd_hca_cap.ipsec_offload);
-
-       MLX5_SET(query_hca_cap_in, in, op_mod,
-                MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 | HCA_CAP_OPMOD_GET_CUR);
-
-       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
-       if (ret) {
-               mlx5_core_err(mdev, "Failed to query device caps 2\n");
-               goto out;
-       }
-
-       caps->full_dw_jumbo_support =
-               MLX5_GET(query_hca_cap_out, out,
-                        capability.cmd_hca_cap_2.format_select_dw_8_6_ext);
-
-       caps->format_select_gtpu_dw_0 =
-               MLX5_GET(query_hca_cap_out, out,
-                        capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_0);
-
-       caps->format_select_gtpu_dw_1 =
-               MLX5_GET(query_hca_cap_out, out,
-                        capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_1);
-
-       caps->format_select_gtpu_dw_2 =
-               MLX5_GET(query_hca_cap_out, out,
-                        capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_2);
-
-       caps->format_select_gtpu_ext_dw_0 =
-               MLX5_GET(query_hca_cap_out, out,
-                        capability.cmd_hca_cap_2.format_select_dw_gtpu_first_ext_dw_0);
-
-       caps->supp_type_gen_wqe =
-               MLX5_GET(query_hca_cap_out, out,
-                        capability.cmd_hca_cap_2.generate_wqe_type);
-
-       caps->flow_table_hash_type =
-               MLX5_GET(query_hca_cap_out, out,
-                        capability.cmd_hca_cap_2.flow_table_hash_type);
-
-       MLX5_SET(query_hca_cap_in, in, op_mod,
-                MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR);
-
-       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
-       if (ret) {
-               mlx5_core_err(mdev, "Failed to query flow table caps\n");
-               goto out;
-       }
-
-       caps->nic_ft.max_level =
-               MLX5_GET(query_hca_cap_out, out,
-                        capability.flow_table_nic_cap.flow_table_properties_nic_receive.max_ft_level);
-
-       caps->nic_ft.reparse =
-               MLX5_GET(query_hca_cap_out, out,
-                        capability.flow_table_nic_cap.flow_table_properties_nic_receive.reparse);
-
-       caps->nic_ft.ignore_flow_level_rtc_valid =
-               MLX5_GET(query_hca_cap_out, out,
-                        capability.flow_table_nic_cap.flow_table_properties_nic_receive.ignore_flow_level_rtc_valid);
-
-       caps->flex_parser_ok_bits_supp =
-               MLX5_GET(query_hca_cap_out, out,
-                        capability.flow_table_nic_cap.flow_table_properties_nic_receive.ft_field_support.geneve_tlv_option_0_exist);
-
-       if (caps->wqe_based_update) {
-               MLX5_SET(query_hca_cap_in, in, op_mod,
-                        MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR);
-
-               ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
-               if (ret) {
-                       mlx5_core_err(mdev, "Failed to query WQE based FT caps\n");
-                       goto out;
-               }
-
-               caps->rtc_reparse_mode =
-                       MLX5_GET(query_hca_cap_out, out,
-                                capability.wqe_based_flow_table_cap.rtc_reparse_mode);
-
-               caps->ste_format =
-                       MLX5_GET(query_hca_cap_out, out,
-                                capability.wqe_based_flow_table_cap.ste_format);
-
-               caps->rtc_index_mode =
-                       MLX5_GET(query_hca_cap_out, out,
-                                capability.wqe_based_flow_table_cap.rtc_index_mode);
-
-               caps->rtc_log_depth_max =
-                       MLX5_GET(query_hca_cap_out, out,
-                                capability.wqe_based_flow_table_cap.rtc_log_depth_max);
-
-               caps->ste_alloc_log_max =
-                       MLX5_GET(query_hca_cap_out, out,
-                                capability.wqe_based_flow_table_cap.ste_alloc_log_max);
-
-               caps->ste_alloc_log_gran =
-                       MLX5_GET(query_hca_cap_out, out,
-                                capability.wqe_based_flow_table_cap.ste_alloc_log_granularity);
-
-               caps->trivial_match_definer =
-                       MLX5_GET(query_hca_cap_out, out,
-                                capability.wqe_based_flow_table_cap.trivial_match_definer);
-
-               caps->stc_alloc_log_max =
-                       MLX5_GET(query_hca_cap_out, out,
-                                capability.wqe_based_flow_table_cap.stc_alloc_log_max);
-
-               caps->stc_alloc_log_gran =
-                       MLX5_GET(query_hca_cap_out, out,
-                                capability.wqe_based_flow_table_cap.stc_alloc_log_granularity);
-
-               caps->rtc_hash_split_table =
-                       MLX5_GET(query_hca_cap_out, out,
-                                capability.wqe_based_flow_table_cap.rtc_hash_split_table);
-
-               caps->rtc_linear_lookup_table =
-                       MLX5_GET(query_hca_cap_out, out,
-                                capability.wqe_based_flow_table_cap.rtc_linear_lookup_table);
-
-               caps->access_index_mode =
-                       MLX5_GET(query_hca_cap_out, out,
-                                capability.wqe_based_flow_table_cap.access_index_mode);
-
-               caps->linear_match_definer =
-                       MLX5_GET(query_hca_cap_out, out,
-                                capability.wqe_based_flow_table_cap.linear_match_definer_reg_c3);
-
-               caps->rtc_max_hash_def_gen_wqe =
-                       MLX5_GET(query_hca_cap_out, out,
-                                capability.wqe_based_flow_table_cap.rtc_max_num_hash_definer_gen_wqe);
-
-               caps->supp_ste_format_gen_wqe =
-                       MLX5_GET(query_hca_cap_out, out,
-                                capability.wqe_based_flow_table_cap.ste_format_gen_wqe);
-
-               caps->fdb_tir_stc =
-                       MLX5_GET(query_hca_cap_out, out,
-                                capability.wqe_based_flow_table_cap.fdb_jump_to_tir_stc);
-       }
-
-       if (caps->eswitch_manager) {
-               MLX5_SET(query_hca_cap_in, in, op_mod,
-                        MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR);
-
-               ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
-               if (ret) {
-                       mlx5_core_err(mdev, "Failed to query flow table esw caps\n");
-                       goto out;
-               }
-
-               caps->fdb_ft.max_level =
-                       MLX5_GET(query_hca_cap_out, out,
-                                capability.flow_table_nic_cap.flow_table_properties_nic_receive.max_ft_level);
-
-               caps->fdb_ft.reparse =
-                       MLX5_GET(query_hca_cap_out, out,
-                                capability.flow_table_nic_cap.flow_table_properties_nic_receive.reparse);
-
-               MLX5_SET(query_hca_cap_in, in, op_mod,
-                        MLX5_SET_HCA_CAP_OP_MOD_ESW | HCA_CAP_OPMOD_GET_CUR);
-
-               ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
-               if (ret) {
-                       mlx5_core_err(mdev, "Failed to query eswitch capabilities\n");
-                       goto out;
-               }
-
-               if (MLX5_GET(query_hca_cap_out, out,
-                            capability.esw_cap.esw_manager_vport_number_valid))
-                       caps->eswitch_manager_vport_number =
-                               MLX5_GET(query_hca_cap_out, out,
-                                        capability.esw_cap.esw_manager_vport_number);
-
-               caps->merged_eswitch = MLX5_GET(query_hca_cap_out, out,
-                                               capability.esw_cap.merged_eswitch);
-       }
-
-       ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
-       if (ret) {
-               mlx5_core_err(mdev, "Failed to query device attributes\n");
-               goto out;
-       }
-
-       snprintf(caps->fw_ver, sizeof(caps->fw_ver), "%d.%d.%d",
-                fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev));
-
-       caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev);
-
-out:
-       kfree(out);
-       return ret;
-}
-
-int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
-                          u16 vport_number, u16 *gvmi)
-{
-       bool ec_vf_func = other_function ? mlx5_core_is_ec_vf_vport(mdev, vport_number) : false;
-       u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
-       int out_size;
-       void *out;
-       int err;
-
-       out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
-       out = kzalloc(out_size, GFP_KERNEL);
-       if (!out)
-               return -ENOMEM;
-
-       MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
-       MLX5_SET(query_hca_cap_in, in, other_function, other_function);
-       MLX5_SET(query_hca_cap_in, in, function_id,
-                mlx5_vport_to_func_id(mdev, vport_number, ec_vf_func));
-       MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
-       MLX5_SET(query_hca_cap_in, in, op_mod,
-                MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 | HCA_CAP_OPMOD_GET_CUR);
-
-       err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
-       if (err) {
-               kfree(out);
-               return err;
-       }
-
-       *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
-
-       kfree(out);
-
-       return 0;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h
deleted file mode 100644 (file)
index 2fbcf4f..0000000
+++ /dev/null
@@ -1,361 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#ifndef MLX5HWS_CMD_H_
-#define MLX5HWS_CMD_H_
-
-#define WIRE_PORT 0xFFFF
-
-#define ACCESS_KEY_LEN 32
-
-enum mlx5hws_cmd_ext_dest_flags {
-       MLX5HWS_CMD_EXT_DEST_REFORMAT = 1 << 0,
-       MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID = 1 << 1,
-};
-
-struct mlx5hws_cmd_set_fte_dest {
-       u8 destination_type;
-       u32 destination_id;
-       enum mlx5hws_cmd_ext_dest_flags ext_flags;
-       u32 ext_reformat_id;
-       u16 esw_owner_vhca_id;
-};
-
-struct mlx5hws_cmd_set_fte_attr {
-       u32 action_flags;
-       bool ignore_flow_level;
-       u8 flow_source;
-       u8 extended_dest;
-       u8 encrypt_decrypt_type;
-       u32 encrypt_decrypt_obj_id;
-       u32 packet_reformat_id;
-       u32 dests_num;
-       struct mlx5hws_cmd_set_fte_dest *dests;
-};
-
-struct mlx5hws_cmd_ft_create_attr {
-       u8 type;
-       u8 level;
-       bool rtc_valid;
-       bool decap_en;
-       bool reformat_en;
-};
-
-struct mlx5hws_cmd_ft_modify_attr {
-       u8 type;
-       u32 rtc_id_0;
-       u32 rtc_id_1;
-       u32 table_miss_id;
-       u8 table_miss_action;
-       u64 modify_fs;
-};
-
-struct mlx5hws_cmd_ft_query_attr {
-       u8 type;
-};
-
-struct mlx5hws_cmd_fg_attr {
-       u32 table_id;
-       u32 table_type;
-};
-
-struct mlx5hws_cmd_forward_tbl {
-       u8 type;
-       u32 ft_id;
-       u32 fg_id;
-       u32 refcount;
-};
-
-struct mlx5hws_cmd_rtc_create_attr {
-       u32 pd;
-       u32 stc_base;
-       u32 ste_base;
-       u32 ste_offset;
-       u32 miss_ft_id;
-       bool fw_gen_wqe;
-       u8 update_index_mode;
-       u8 access_index_mode;
-       u8 num_hash_definer;
-       u8 log_depth;
-       u8 log_size;
-       u8 table_type;
-       u8 match_definer_0;
-       u8 match_definer_1;
-       u8 reparse_mode;
-       bool is_frst_jumbo;
-       bool is_scnd_range;
-};
-
-struct mlx5hws_cmd_alias_obj_create_attr {
-       u32 obj_id;
-       u16 vhca_id;
-       u16 obj_type;
-       u8 access_key[ACCESS_KEY_LEN];
-};
-
-struct mlx5hws_cmd_stc_create_attr {
-       u8 log_obj_range;
-       u8 table_type;
-};
-
-struct mlx5hws_cmd_stc_modify_attr {
-       u32 stc_offset;
-       u8 action_offset;
-       u8 reparse_mode;
-       enum mlx5_ifc_stc_action_type action_type;
-       union {
-               u32 id; /* TIRN, TAG, FT ID, STE ID, CRYPTO */
-               struct {
-                       u8 decap;
-                       u16 start_anchor;
-                       u16 end_anchor;
-               } remove_header;
-               struct {
-                       u32 arg_id;
-                       u32 pattern_id;
-               } modify_header;
-               struct {
-                       __be64 data;
-               } modify_action;
-               struct {
-                       u32 arg_id;
-                       u32 header_size;
-                       u8 is_inline;
-                       u8 encap;
-                       u16 insert_anchor;
-                       u16 insert_offset;
-               } insert_header;
-               struct {
-                       u8 aso_type;
-                       u32 devx_obj_id;
-                       u8 return_reg_id;
-               } aso;
-               struct {
-                       u16 vport_num;
-                       u16 esw_owner_vhca_id;
-                       u8 eswitch_owner_vhca_id_valid;
-               } vport;
-               struct {
-                       struct mlx5hws_pool_chunk ste;
-                       struct mlx5hws_pool *ste_pool;
-                       u32 ste_obj_id; /* Internal */
-                       u32 match_definer_id;
-                       u8 log_hash_size;
-                       bool ignore_tx;
-               } ste_table;
-               struct {
-                       u16 start_anchor;
-                       u16 num_of_words;
-               } remove_words;
-               struct {
-                       u8 type;
-                       u8 op;
-                       u8 size;
-               } reformat_trailer;
-
-               u32 dest_table_id;
-               u32 dest_tir_num;
-       };
-};
-
-struct mlx5hws_cmd_ste_create_attr {
-       u8 log_obj_range;
-       u8 table_type;
-};
-
-struct mlx5hws_cmd_definer_create_attr {
-       u8 *dw_selector;
-       u8 *byte_selector;
-       u8 *match_mask;
-};
-
-struct mlx5hws_cmd_allow_other_vhca_access_attr {
-       u16 obj_type;
-       u32 obj_id;
-       u8 access_key[ACCESS_KEY_LEN];
-};
-
-struct mlx5hws_cmd_packet_reformat_create_attr {
-       u8 type;
-       size_t data_sz;
-       void *data;
-       u8 reformat_param_0;
-};
-
-struct mlx5hws_cmd_query_ft_caps {
-       u8 max_level;
-       u8 reparse;
-       u8 ignore_flow_level_rtc_valid;
-};
-
-struct mlx5hws_cmd_generate_wqe_attr {
-       u8 *wqe_ctrl;
-       u8 *gta_ctrl;
-       u8 *gta_data_0;
-       u8 *gta_data_1;
-       u32 pdn;
-};
-
-struct mlx5hws_cmd_query_caps {
-       u32 flex_protocols;
-       u8 wqe_based_update;
-       u8 rtc_reparse_mode;
-       u16 ste_format;
-       u8 rtc_index_mode;
-       u8 ste_alloc_log_max;
-       u8 ste_alloc_log_gran;
-       u8 stc_alloc_log_max;
-       u8 stc_alloc_log_gran;
-       u8 rtc_log_depth_max;
-       u8 format_select_gtpu_dw_0;
-       u8 format_select_gtpu_dw_1;
-       u8 flow_table_hash_type;
-       u8 format_select_gtpu_dw_2;
-       u8 format_select_gtpu_ext_dw_0;
-       u8 access_index_mode;
-       u32 linear_match_definer;
-       bool full_dw_jumbo_support;
-       bool rtc_hash_split_table;
-       bool rtc_linear_lookup_table;
-       u32 supp_type_gen_wqe;
-       u8 rtc_max_hash_def_gen_wqe;
-       u16 supp_ste_format_gen_wqe;
-       struct mlx5hws_cmd_query_ft_caps nic_ft;
-       struct mlx5hws_cmd_query_ft_caps fdb_ft;
-       bool eswitch_manager;
-       bool merged_eswitch;
-       u32 eswitch_manager_vport_number;
-       u8 log_header_modify_argument_granularity;
-       u8 log_header_modify_argument_max_alloc;
-       u8 sq_ts_format;
-       u8 fdb_tir_stc;
-       u64 definer_format_sup;
-       u32 trivial_match_definer;
-       u32 vhca_id;
-       u32 shared_vhca_id;
-       char fw_ver[64];
-       bool ipsec_offload;
-       bool is_ecpf;
-       u8 flex_parser_ok_bits_supp;
-       u8 flex_parser_id_geneve_tlv_option_0;
-       u8 flex_parser_id_mpls_over_gre;
-       u8 flex_parser_id_mpls_over_udp;
-};
-
-int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev,
-                                 struct mlx5hws_cmd_ft_create_attr *ft_attr,
-                                 u32 *table_id);
-
-int mlx5hws_cmd_flow_table_modify(struct mlx5_core_dev *mdev,
-                                 struct mlx5hws_cmd_ft_modify_attr *ft_attr,
-                                 u32 table_id);
-
-int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev,
-                                u32 obj_id,
-                                struct mlx5hws_cmd_ft_query_attr *ft_attr,
-                                u64 *icm_addr_0, u64 *icm_addr_1);
-
-int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
-                                  u8 fw_ft_type, u32 table_id);
-
-void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev,
-                                         u32 table_id);
-
-int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev,
-                          struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
-                          u32 *rtc_id);
-
-void mlx5hws_cmd_rtc_destroy(struct mlx5_core_dev *mdev, u32 rtc_id);
-
-int mlx5hws_cmd_stc_create(struct mlx5_core_dev *mdev,
-                          struct mlx5hws_cmd_stc_create_attr *stc_attr,
-                          u32 *stc_id);
-
-int mlx5hws_cmd_stc_modify(struct mlx5_core_dev *mdev,
-                          u32 stc_id,
-                          struct mlx5hws_cmd_stc_modify_attr *stc_attr);
-
-void mlx5hws_cmd_stc_destroy(struct mlx5_core_dev *mdev, u32 stc_id);
-
-int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev,
-                            struct mlx5hws_cmd_generate_wqe_attr *attr,
-                            struct mlx5_cqe64 *ret_cqe);
-
-int mlx5hws_cmd_ste_create(struct mlx5_core_dev *mdev,
-                          struct mlx5hws_cmd_ste_create_attr *ste_attr,
-                          u32 *ste_id);
-
-void mlx5hws_cmd_ste_destroy(struct mlx5_core_dev *mdev, u32 ste_id);
-
-int mlx5hws_cmd_definer_create(struct mlx5_core_dev *mdev,
-                              struct mlx5hws_cmd_definer_create_attr *def_attr,
-                              u32 *definer_id);
-
-void mlx5hws_cmd_definer_destroy(struct mlx5_core_dev *mdev,
-                                u32 definer_id);
-
-int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
-                          u16 log_obj_range,
-                          u32 pd,
-                          u32 *arg_id);
-
-void mlx5hws_cmd_arg_destroy(struct mlx5_core_dev *mdev,
-                            u32 arg_id);
-
-int mlx5hws_cmd_header_modify_pattern_create(struct mlx5_core_dev *mdev,
-                                            u32 pattern_length,
-                                            u8 *actions,
-                                            u32 *ptrn_id);
-
-void mlx5hws_cmd_header_modify_pattern_destroy(struct mlx5_core_dev *mdev,
-                                              u32 ptrn_id);
-
-int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
-                                      struct mlx5hws_cmd_packet_reformat_create_attr *attr,
-                                      u32 *reformat_id);
-
-int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev,
-                                       u32 reformat_id);
-
-int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev,
-                       u32 table_type,
-                       u32 table_id,
-                       u32 group_id,
-                       struct mlx5hws_cmd_set_fte_attr *fte_attr);
-
-int mlx5hws_cmd_delete_fte(struct mlx5_core_dev *mdev,
-                          u32 table_type, u32 table_id);
-
-struct mlx5hws_cmd_forward_tbl *
-mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev,
-                              struct mlx5hws_cmd_ft_create_attr *ft_attr,
-                              struct mlx5hws_cmd_set_fte_attr *fte_attr);
-
-void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev,
-                                    struct mlx5hws_cmd_forward_tbl *tbl);
-
-int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev,
-                                struct mlx5hws_cmd_alias_obj_create_attr *alias_attr,
-                                u32 *obj_id);
-
-int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev,
-                                 u16 obj_type,
-                                 u32 obj_id);
-
-int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn);
-
-int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev,
-                          struct mlx5hws_cmd_query_caps *caps);
-
-void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
-                                          u32 fw_ft_type,
-                                          enum mlx5hws_table_type type,
-                                          struct mlx5hws_cmd_ft_modify_attr *ft_attr);
-
-int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev,
-                                       struct mlx5hws_cmd_allow_other_vhca_access_attr *attr);
-
-int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
-                          u16 vport_number, u16 *gvmi);
-
-#endif /* MLX5HWS_CMD_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c
deleted file mode 100644 (file)
index 00e4fdf..0000000
+++ /dev/null
@@ -1,260 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved. */
-
-#include "mlx5hws_internal.h"
-
-bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx)
-{
-       return IS_BIT_SET(ctx->caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_BY_STC);
-}
-
-u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx)
-{
-       /* Prefer to use dynamic reparse, reparse only specific actions */
-       if (mlx5hws_context_cap_dynamic_reparse(ctx))
-               return MLX5_IFC_RTC_REPARSE_NEVER;
-
-       /* Otherwise use less efficient static */
-       return MLX5_IFC_RTC_REPARSE_ALWAYS;
-}
-
-static int hws_context_pools_init(struct mlx5hws_context *ctx)
-{
-       struct mlx5hws_pool_attr pool_attr = {0};
-       u8 max_log_sz;
-       int ret;
-       int i;
-
-       ret = mlx5hws_pat_init_pattern_cache(&ctx->pattern_cache);
-       if (ret)
-               return ret;
-
-       ret = mlx5hws_definer_init_cache(&ctx->definer_cache);
-       if (ret)
-               goto uninit_pat_cache;
-
-       /* Create an STC pool per FT type */
-       pool_attr.pool_type = MLX5HWS_POOL_TYPE_STC;
-       pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STC_POOL;
-       max_log_sz = min(MLX5HWS_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max);
-       pool_attr.alloc_log_sz = max(max_log_sz, ctx->caps->stc_alloc_log_gran);
-
-       for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
-               pool_attr.table_type = i;
-               ctx->stc_pool[i] = mlx5hws_pool_create(ctx, &pool_attr);
-               if (!ctx->stc_pool[i]) {
-                       mlx5hws_err(ctx, "Failed to allocate STC pool [%d]", i);
-                       ret = -ENOMEM;
-                       goto free_stc_pools;
-               }
-       }
-
-       return 0;
-
-free_stc_pools:
-       for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++)
-               if (ctx->stc_pool[i])
-                       mlx5hws_pool_destroy(ctx->stc_pool[i]);
-
-       mlx5hws_definer_uninit_cache(ctx->definer_cache);
-uninit_pat_cache:
-       mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
-       return ret;
-}
-
-static void hws_context_pools_uninit(struct mlx5hws_context *ctx)
-{
-       int i;
-
-       for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
-               if (ctx->stc_pool[i])
-                       mlx5hws_pool_destroy(ctx->stc_pool[i]);
-       }
-
-       mlx5hws_definer_uninit_cache(ctx->definer_cache);
-       mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
-}
-
-static int hws_context_init_pd(struct mlx5hws_context *ctx)
-{
-       int ret = 0;
-
-       ret = mlx5_core_alloc_pd(ctx->mdev, &ctx->pd_num);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to allocate PD\n");
-               return ret;
-       }
-
-       ctx->flags |= MLX5HWS_CONTEXT_FLAG_PRIVATE_PD;
-
-       return 0;
-}
-
-static int hws_context_uninit_pd(struct mlx5hws_context *ctx)
-{
-       if (ctx->flags & MLX5HWS_CONTEXT_FLAG_PRIVATE_PD)
-               mlx5_core_dealloc_pd(ctx->mdev, ctx->pd_num);
-
-       return 0;
-}
-
-static void hws_context_check_hws_supp(struct mlx5hws_context *ctx)
-{
-       struct mlx5hws_cmd_query_caps *caps = ctx->caps;
-
-       /* HWS not supported on device / FW */
-       if (!caps->wqe_based_update) {
-               mlx5hws_err(ctx, "Required HWS WQE based insertion cap not supported\n");
-               return;
-       }
-
-       if (!caps->eswitch_manager) {
-               mlx5hws_err(ctx, "HWS is not supported for non eswitch manager port\n");
-               return;
-       }
-
-       /* Current solution requires all rules to set reparse bit */
-       if ((!caps->nic_ft.reparse ||
-            (!caps->fdb_ft.reparse && caps->eswitch_manager)) ||
-           !IS_BIT_SET(caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS)) {
-               mlx5hws_err(ctx, "Required HWS reparse cap not supported\n");
-               return;
-       }
-
-       /* FW/HW must support 8DW STE */
-       if (!IS_BIT_SET(caps->ste_format, MLX5_IFC_RTC_STE_FORMAT_8DW)) {
-               mlx5hws_err(ctx, "Required HWS STE format not supported\n");
-               return;
-       }
-
-       /* Adding rules by hash and by offset are requirements */
-       if (!IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH) ||
-           !IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET)) {
-               mlx5hws_err(ctx, "Required HWS RTC update mode not supported\n");
-               return;
-       }
-
-       /* Support for SELECT definer ID is required */
-       if (!IS_BIT_SET(caps->definer_format_sup, MLX5_IFC_DEFINER_FORMAT_ID_SELECT)) {
-               mlx5hws_err(ctx, "Required HWS Dynamic definer not supported\n");
-               return;
-       }
-
-       ctx->flags |= MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT;
-}
-
-static int hws_context_init_hws(struct mlx5hws_context *ctx,
-                               struct mlx5hws_context_attr *attr)
-{
-       int ret;
-
-       hws_context_check_hws_supp(ctx);
-
-       if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
-               return 0;
-
-       ret = hws_context_init_pd(ctx);
-       if (ret)
-               return ret;
-
-       ret = hws_context_pools_init(ctx);
-       if (ret)
-               goto uninit_pd;
-
-       if (attr->bwc)
-               ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
-
-       ret = mlx5hws_send_queues_open(ctx, attr->queues, attr->queue_size);
-       if (ret)
-               goto pools_uninit;
-
-       INIT_LIST_HEAD(&ctx->tbl_list);
-
-       return 0;
-
-pools_uninit:
-       hws_context_pools_uninit(ctx);
-uninit_pd:
-       hws_context_uninit_pd(ctx);
-       return ret;
-}
-
-static void hws_context_uninit_hws(struct mlx5hws_context *ctx)
-{
-       if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
-               return;
-
-       mlx5hws_send_queues_close(ctx);
-       hws_context_pools_uninit(ctx);
-       hws_context_uninit_pd(ctx);
-}
-
-struct mlx5hws_context *mlx5hws_context_open(struct mlx5_core_dev *mdev,
-                                            struct mlx5hws_context_attr *attr)
-{
-       struct mlx5hws_context *ctx;
-       int ret;
-
-       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
-       if (!ctx)
-               return NULL;
-
-       ctx->mdev = mdev;
-
-       mutex_init(&ctx->ctrl_lock);
-       xa_init(&ctx->peer_ctx_xa);
-
-       ctx->caps = kzalloc(sizeof(*ctx->caps), GFP_KERNEL);
-       if (!ctx->caps)
-               goto free_ctx;
-
-       ret = mlx5hws_cmd_query_caps(mdev, ctx->caps);
-       if (ret)
-               goto free_caps;
-
-       ret = mlx5hws_vport_init_vports(ctx);
-       if (ret)
-               goto free_caps;
-
-       ret = hws_context_init_hws(ctx, attr);
-       if (ret)
-               goto uninit_vports;
-
-       mlx5hws_debug_init_dump(ctx);
-
-       return ctx;
-
-uninit_vports:
-       mlx5hws_vport_uninit_vports(ctx);
-free_caps:
-       kfree(ctx->caps);
-free_ctx:
-       xa_destroy(&ctx->peer_ctx_xa);
-       mutex_destroy(&ctx->ctrl_lock);
-       kfree(ctx);
-       return NULL;
-}
-
-int mlx5hws_context_close(struct mlx5hws_context *ctx)
-{
-       mlx5hws_debug_uninit_dump(ctx);
-       hws_context_uninit_hws(ctx);
-       mlx5hws_vport_uninit_vports(ctx);
-       kfree(ctx->caps);
-       xa_destroy(&ctx->peer_ctx_xa);
-       mutex_destroy(&ctx->ctrl_lock);
-       kfree(ctx);
-       return 0;
-}
-
-void mlx5hws_context_set_peer(struct mlx5hws_context *ctx,
-                             struct mlx5hws_context *peer_ctx,
-                             u16 peer_vhca_id)
-{
-       mutex_lock(&ctx->ctrl_lock);
-
-       if (xa_err(xa_store(&ctx->peer_ctx_xa, peer_vhca_id, peer_ctx, GFP_KERNEL)))
-               pr_warn("HWS: failed storing peer vhca ID in peer xarray\n");
-
-       mutex_unlock(&ctx->ctrl_lock);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h
deleted file mode 100644 (file)
index 8ab548a..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#ifndef MLX5HWS_CONTEXT_H_
-#define MLX5HWS_CONTEXT_H_
-
-enum mlx5hws_context_flags {
-       MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT = 1 << 0,
-       MLX5HWS_CONTEXT_FLAG_PRIVATE_PD = 1 << 1,
-       MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT = 1 << 2,
-};
-
-enum mlx5hws_context_shared_stc_type {
-       MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3 = 0,
-       MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP = 1,
-       MLX5HWS_CONTEXT_SHARED_STC_MAX = 2,
-};
-
-struct mlx5hws_context_common_res {
-       struct mlx5hws_action_default_stc *default_stc;
-       struct mlx5hws_action_shared_stc *shared_stc[MLX5HWS_CONTEXT_SHARED_STC_MAX];
-       struct mlx5hws_cmd_forward_tbl *default_miss;
-};
-
-struct mlx5hws_context_debug_info {
-       struct dentry *steering_debugfs;
-       struct dentry *fdb_debugfs;
-};
-
-struct mlx5hws_context_vports {
-       u16 esw_manager_gvmi;
-       u16 uplink_gvmi;
-       struct xarray vport_gvmi_xa;
-};
-
-struct mlx5hws_context {
-       struct mlx5_core_dev *mdev;
-       struct mlx5hws_cmd_query_caps *caps;
-       u32 pd_num;
-       struct mlx5hws_pool *stc_pool[MLX5HWS_TABLE_TYPE_MAX];
-       struct mlx5hws_context_common_res common_res[MLX5HWS_TABLE_TYPE_MAX];
-       struct mlx5hws_pattern_cache *pattern_cache;
-       struct mlx5hws_definer_cache *definer_cache;
-       struct mutex ctrl_lock; /* control lock to protect the whole context */
-       enum mlx5hws_context_flags flags;
-       struct mlx5hws_send_engine *send_queue;
-       size_t queues;
-       struct mutex *bwc_send_queue_locks; /* protect BWC queues */
-       struct lock_class_key *bwc_lock_class_keys;
-       struct list_head tbl_list;
-       struct mlx5hws_context_debug_info debug_info;
-       struct xarray peer_ctx_xa;
-       struct mlx5hws_context_vports vports;
-};
-
-static inline bool mlx5hws_context_bwc_supported(struct mlx5hws_context *ctx)
-{
-       return ctx->flags & MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
-}
-
-bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx);
-
-u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx);
-
-#endif /* MLX5HWS_CONTEXT_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c
deleted file mode 100644 (file)
index 2b8c5a4..0000000
+++ /dev/null
@@ -1,480 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#include <linux/debugfs.h>
-#include <linux/kernel.h>
-#include <linux/seq_file.h>
-#include <linux/version.h>
-#include "mlx5hws_internal.h"
-
-static int
-hws_debug_dump_matcher_template_definer(struct seq_file *f,
-                                       void *parent_obj,
-                                       struct mlx5hws_definer *definer,
-                                       enum mlx5hws_debug_res_type type)
-{
-       int i;
-
-       if (!definer)
-               return 0;
-
-       seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,",
-                  type,
-                  HWS_PTR_TO_ID(definer),
-                  HWS_PTR_TO_ID(parent_obj),
-                  definer->obj_id,
-                  definer->type);
-
-       for (i = 0; i < DW_SELECTORS; i++)
-               seq_printf(f, "0x%x%s", definer->dw_selector[i],
-                          (i == DW_SELECTORS - 1) ? "," : "-");
-
-       for (i = 0; i < BYTE_SELECTORS; i++)
-               seq_printf(f, "0x%x%s", definer->byte_selector[i],
-                          (i == BYTE_SELECTORS - 1) ? "," : "-");
-
-       for (i = 0; i < MLX5HWS_JUMBO_TAG_SZ; i++)
-               seq_printf(f, "%02x", definer->mask.jumbo[i]);
-
-       seq_puts(f, "\n");
-
-       return 0;
-}
-
-static int
-hws_debug_dump_matcher_match_template(struct seq_file *f, struct mlx5hws_matcher *matcher)
-{
-       enum mlx5hws_debug_res_type type;
-       int i, ret;
-
-       for (i = 0; i < matcher->num_of_mt; i++) {
-               struct mlx5hws_match_template *mt = &matcher->mt[i];
-
-               seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d\n",
-                          MLX5HWS_DEBUG_RES_TYPE_MATCHER_MATCH_TEMPLATE,
-                          HWS_PTR_TO_ID(mt),
-                          HWS_PTR_TO_ID(matcher),
-                          mt->fc_sz,
-                          0, 0);
-
-               type = MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_MATCH_DEFINER;
-               ret = hws_debug_dump_matcher_template_definer(f, mt, mt->definer, type);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static int
-hws_debug_dump_matcher_action_template(struct seq_file *f, struct mlx5hws_matcher *matcher)
-{
-       enum mlx5hws_action_type action_type;
-       int i, j;
-
-       for (i = 0; i < matcher->num_of_at; i++) {
-               struct mlx5hws_action_template *at = &matcher->at[i];
-
-               seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d",
-                          MLX5HWS_DEBUG_RES_TYPE_MATCHER_ACTION_TEMPLATE,
-                          HWS_PTR_TO_ID(at),
-                          HWS_PTR_TO_ID(matcher),
-                          at->only_term,
-                          at->num_of_action_stes,
-                          at->num_actions);
-
-               for (j = 0; j < at->num_actions; j++) {
-                       action_type = at->action_type_arr[j];
-                       seq_printf(f, ",%s", mlx5hws_action_type_to_str(action_type));
-               }
-
-               seq_puts(f, "\n");
-       }
-
-       return 0;
-}
-
-static int
-hws_debug_dump_matcher_attr(struct seq_file *f, struct mlx5hws_matcher *matcher)
-{
-       struct mlx5hws_matcher_attr *attr = &matcher->attr;
-
-       seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d\n",
-                  MLX5HWS_DEBUG_RES_TYPE_MATCHER_ATTR,
-                  HWS_PTR_TO_ID(matcher),
-                  attr->priority,
-                  attr->mode,
-                  attr->table.sz_row_log,
-                  attr->table.sz_col_log,
-                  attr->optimize_using_rule_idx,
-                  attr->optimize_flow_src,
-                  attr->insert_mode,
-                  attr->distribute_mode);
-
-       return 0;
-}
-
-static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *matcher)
-{
-       enum mlx5hws_table_type tbl_type = matcher->tbl->type;
-       struct mlx5hws_cmd_ft_query_attr ft_attr = {0};
-       struct mlx5hws_pool_chunk *ste;
-       struct mlx5hws_pool *ste_pool;
-       u64 icm_addr_0 = 0;
-       u64 icm_addr_1 = 0;
-       u32 ste_0_id = -1;
-       u32 ste_1_id = -1;
-       int ret;
-
-       seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,0x%llx",
-                  MLX5HWS_DEBUG_RES_TYPE_MATCHER,
-                  HWS_PTR_TO_ID(matcher),
-                  HWS_PTR_TO_ID(matcher->tbl),
-                  matcher->num_of_mt,
-                  matcher->end_ft_id,
-                  matcher->col_matcher ? HWS_PTR_TO_ID(matcher->col_matcher) : 0);
-
-       ste = &matcher->match_ste.ste;
-       ste_pool = matcher->match_ste.pool;
-       if (ste_pool) {
-               ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
-               if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
-                       ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
-       }
-
-       seq_printf(f, ",%d,%d,%d,%d",
-                  matcher->match_ste.rtc_0_id,
-                  (int)ste_0_id,
-                  matcher->match_ste.rtc_1_id,
-                  (int)ste_1_id);
-
-       ste = &matcher->action_ste[0].ste;
-       ste_pool = matcher->action_ste[0].pool;
-       if (ste_pool) {
-               ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
-               if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
-                       ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
-               else
-                       ste_1_id = -1;
-       } else {
-               ste_0_id = -1;
-               ste_1_id = -1;
-       }
-
-       ft_attr.type = matcher->tbl->fw_ft_type;
-       ret = mlx5hws_cmd_flow_table_query(matcher->tbl->ctx->mdev,
-                                          matcher->end_ft_id,
-                                          &ft_attr,
-                                          &icm_addr_0,
-                                          &icm_addr_1);
-       if (ret)
-               return ret;
-
-       seq_printf(f, ",%d,%d,%d,%d,%d,0x%llx,0x%llx\n",
-                  matcher->action_ste[0].rtc_0_id,
-                  (int)ste_0_id,
-                  matcher->action_ste[0].rtc_1_id,
-                  (int)ste_1_id,
-                  0,
-                  mlx5hws_debug_icm_to_idx(icm_addr_0),
-                  mlx5hws_debug_icm_to_idx(icm_addr_1));
-
-       ret = hws_debug_dump_matcher_attr(f, matcher);
-       if (ret)
-               return ret;
-
-       ret = hws_debug_dump_matcher_match_template(f, matcher);
-       if (ret)
-               return ret;
-
-       ret = hws_debug_dump_matcher_action_template(f, matcher);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int hws_debug_dump_table(struct seq_file *f, struct mlx5hws_table *tbl)
-{
-       struct mlx5hws_cmd_ft_query_attr ft_attr = {0};
-       struct mlx5hws_matcher *matcher;
-       u64 local_icm_addr_0 = 0;
-       u64 local_icm_addr_1 = 0;
-       u64 icm_addr_0 = 0;
-       u64 icm_addr_1 = 0;
-       int ret;
-
-       seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d,%d,%d",
-                  MLX5HWS_DEBUG_RES_TYPE_TABLE,
-                  HWS_PTR_TO_ID(tbl),
-                  HWS_PTR_TO_ID(tbl->ctx),
-                  tbl->ft_id,
-                  MLX5HWS_TABLE_TYPE_BASE + tbl->type,
-                  tbl->fw_ft_type,
-                  tbl->level,
-                  0);
-
-       ft_attr.type = tbl->fw_ft_type;
-       ret = mlx5hws_cmd_flow_table_query(tbl->ctx->mdev,
-                                          tbl->ft_id,
-                                          &ft_attr,
-                                          &icm_addr_0,
-                                          &icm_addr_1);
-       if (ret)
-               return ret;
-
-       seq_printf(f, ",0x%llx,0x%llx,0x%llx,0x%llx,0x%llx\n",
-                  mlx5hws_debug_icm_to_idx(icm_addr_0),
-                  mlx5hws_debug_icm_to_idx(icm_addr_1),
-                  mlx5hws_debug_icm_to_idx(local_icm_addr_0),
-                  mlx5hws_debug_icm_to_idx(local_icm_addr_1),
-                  HWS_PTR_TO_ID(tbl->default_miss.miss_tbl));
-
-       list_for_each_entry(matcher, &tbl->matchers_list, list_node) {
-               ret = hws_debug_dump_matcher(f, matcher);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static int
-hws_debug_dump_context_send_engine(struct seq_file *f, struct mlx5hws_context *ctx)
-{
-       struct mlx5hws_send_engine *send_queue;
-       struct mlx5hws_send_ring *send_ring;
-       struct mlx5hws_send_ring_cq *cq;
-       struct mlx5hws_send_ring_sq *sq;
-       int i;
-
-       for (i = 0; i < (int)ctx->queues; i++) {
-               send_queue = &ctx->send_queue[i];
-               seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
-                          MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_ENGINE,
-                          HWS_PTR_TO_ID(ctx),
-                          i,
-                          send_queue->used_entries,
-                          send_queue->num_entries,
-                          1, /* one send ring per queue */
-                          send_queue->num_entries,
-                          send_queue->err,
-                          send_queue->completed.ci,
-                          send_queue->completed.pi,
-                          send_queue->completed.mask);
-
-               send_ring = &send_queue->send_ring;
-               cq = &send_ring->send_cq;
-               sq = &send_ring->send_sq;
-
-               seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
-                          MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_RING,
-                          HWS_PTR_TO_ID(ctx),
-                          0, /* one send ring per send queue */
-                          i,
-                          cq->mcq.cqn,
-                          0,
-                          0,
-                          0,
-                          0,
-                          0,
-                          0,
-                          cq->mcq.cqe_sz,
-                          sq->sqn,
-                          0,
-                          0,
-                          0);
-       }
-
-       return 0;
-}
-
-static int hws_debug_dump_context_caps(struct seq_file *f, struct mlx5hws_context *ctx)
-{
-       struct mlx5hws_cmd_query_caps *caps = ctx->caps;
-
-       seq_printf(f, "%d,0x%llx,%s,%d,%d,%d,%d,",
-                  MLX5HWS_DEBUG_RES_TYPE_CONTEXT_CAPS,
-                  HWS_PTR_TO_ID(ctx),
-                  caps->fw_ver,
-                  caps->wqe_based_update,
-                  caps->ste_format,
-                  caps->ste_alloc_log_max,
-                  caps->log_header_modify_argument_max_alloc);
-
-       seq_printf(f, "%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%s\n",
-                  caps->flex_protocols,
-                  caps->rtc_reparse_mode,
-                  caps->rtc_index_mode,
-                  caps->ste_alloc_log_gran,
-                  caps->stc_alloc_log_max,
-                  caps->stc_alloc_log_gran,
-                  caps->rtc_log_depth_max,
-                  caps->format_select_gtpu_dw_0,
-                  caps->format_select_gtpu_dw_1,
-                  caps->format_select_gtpu_dw_2,
-                  caps->format_select_gtpu_ext_dw_0,
-                  caps->nic_ft.max_level,
-                  caps->nic_ft.reparse,
-                  caps->fdb_ft.max_level,
-                  caps->fdb_ft.reparse,
-                  caps->log_header_modify_argument_granularity,
-                  caps->linear_match_definer,
-                  "regc_3");
-
-       return 0;
-}
-
-static int hws_debug_dump_context_attr(struct seq_file *f, struct mlx5hws_context *ctx)
-{
-       seq_printf(f, "%u,0x%llx,%d,%zu,%d,%s,%d,%d\n",
-                  MLX5HWS_DEBUG_RES_TYPE_CONTEXT_ATTR,
-                  HWS_PTR_TO_ID(ctx),
-                  ctx->pd_num,
-                  ctx->queues,
-                  ctx->send_queue->num_entries,
-                  "None", /* no shared gvmi */
-                  ctx->caps->vhca_id,
-                  0xffff); /* no shared gvmi */
-
-       return 0;
-}
-
-static int hws_debug_dump_context_info(struct seq_file *f, struct mlx5hws_context *ctx)
-{
-       struct mlx5_core_dev *dev = ctx->mdev;
-       int ret;
-
-       seq_printf(f, "%d,0x%llx,%d,%s,%s.KERNEL_%u_%u_%u\n",
-                  MLX5HWS_DEBUG_RES_TYPE_CONTEXT,
-                  HWS_PTR_TO_ID(ctx),
-                  ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT,
-                  pci_name(dev->pdev),
-                  HWS_DEBUG_FORMAT_VERSION,
-                  LINUX_VERSION_MAJOR,
-                  LINUX_VERSION_PATCHLEVEL,
-                  LINUX_VERSION_SUBLEVEL);
-
-       ret = hws_debug_dump_context_attr(f, ctx);
-       if (ret)
-               return ret;
-
-       ret = hws_debug_dump_context_caps(f, ctx);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int hws_debug_dump_context_stc_resource(struct seq_file *f,
-                                              struct mlx5hws_context *ctx,
-                                              u32 tbl_type,
-                                              struct mlx5hws_pool_resource *resource)
-{
-       seq_printf(f, "%d,0x%llx,%u,%u\n",
-                  MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC,
-                  HWS_PTR_TO_ID(ctx),
-                  tbl_type,
-                  resource->base_id);
-
-       return 0;
-}
-
-static int hws_debug_dump_context_stc(struct seq_file *f, struct mlx5hws_context *ctx)
-{
-       struct mlx5hws_pool *stc_pool;
-       u32 table_type;
-       int ret;
-       int i;
-
-       for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
-               stc_pool = ctx->stc_pool[i];
-               table_type = MLX5HWS_TABLE_TYPE_BASE + i;
-
-               if (!stc_pool)
-                       continue;
-
-               if (stc_pool->resource[0]) {
-                       ret = hws_debug_dump_context_stc_resource(f, ctx, table_type,
-                                                                 stc_pool->resource[0]);
-                       if (ret)
-                               return ret;
-               }
-
-               if (i == MLX5HWS_TABLE_TYPE_FDB && stc_pool->mirror_resource[0]) {
-                       ret = hws_debug_dump_context_stc_resource(f, ctx, table_type,
-                                                                 stc_pool->mirror_resource[0]);
-                       if (ret)
-                               return ret;
-               }
-       }
-
-       return 0;
-}
-
-static int hws_debug_dump_context(struct seq_file *f, struct mlx5hws_context *ctx)
-{
-       struct mlx5hws_table *tbl;
-       int ret;
-
-       ret = hws_debug_dump_context_info(f, ctx);
-       if (ret)
-               return ret;
-
-       ret = hws_debug_dump_context_send_engine(f, ctx);
-       if (ret)
-               return ret;
-
-       ret = hws_debug_dump_context_stc(f, ctx);
-       if (ret)
-               return ret;
-
-       list_for_each_entry(tbl, &ctx->tbl_list, tbl_list_node) {
-               ret = hws_debug_dump_table(f, tbl);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static int
-hws_debug_dump(struct seq_file *f, struct mlx5hws_context *ctx)
-{
-       int ret;
-
-       if (!f || !ctx)
-               return -EINVAL;
-
-       mutex_lock(&ctx->ctrl_lock);
-       ret = hws_debug_dump_context(f, ctx);
-       mutex_unlock(&ctx->ctrl_lock);
-
-       return ret;
-}
-
-static int hws_dump_show(struct seq_file *file, void *priv)
-{
-       return hws_debug_dump(file, file->private);
-}
-DEFINE_SHOW_ATTRIBUTE(hws_dump);
-
-void mlx5hws_debug_init_dump(struct mlx5hws_context *ctx)
-{
-       struct mlx5_core_dev *dev = ctx->mdev;
-       char file_name[128];
-
-       ctx->debug_info.steering_debugfs =
-               debugfs_create_dir("steering", mlx5_debugfs_get_dev_root(dev));
-       ctx->debug_info.fdb_debugfs =
-               debugfs_create_dir("fdb", ctx->debug_info.steering_debugfs);
-
-       sprintf(file_name, "ctx_%p", ctx);
-       debugfs_create_file(file_name, 0444, ctx->debug_info.fdb_debugfs,
-                           ctx, &hws_dump_fops);
-}
-
-void mlx5hws_debug_uninit_dump(struct mlx5hws_context *ctx)
-{
-       debugfs_remove_recursive(ctx->debug_info.steering_debugfs);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h
deleted file mode 100644 (file)
index b93a536..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#ifndef MLX5HWS_DEBUG_H_
-#define MLX5HWS_DEBUG_H_
-
-#define HWS_DEBUG_FORMAT_VERSION "1.0"
-
-#define HWS_PTR_TO_ID(p) ((u64)(uintptr_t)(p) & 0xFFFFFFFFULL)
-
-enum mlx5hws_debug_res_type {
-       MLX5HWS_DEBUG_RES_TYPE_CONTEXT = 4000,
-       MLX5HWS_DEBUG_RES_TYPE_CONTEXT_ATTR = 4001,
-       MLX5HWS_DEBUG_RES_TYPE_CONTEXT_CAPS = 4002,
-       MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_ENGINE = 4003,
-       MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_RING = 4004,
-       MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC = 4005,
-
-       MLX5HWS_DEBUG_RES_TYPE_TABLE = 4100,
-
-       MLX5HWS_DEBUG_RES_TYPE_MATCHER = 4200,
-       MLX5HWS_DEBUG_RES_TYPE_MATCHER_ATTR = 4201,
-       MLX5HWS_DEBUG_RES_TYPE_MATCHER_MATCH_TEMPLATE = 4202,
-       MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_MATCH_DEFINER = 4203,
-       MLX5HWS_DEBUG_RES_TYPE_MATCHER_ACTION_TEMPLATE = 4204,
-       MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_HASH_DEFINER = 4205,
-       MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_RANGE_DEFINER = 4206,
-       MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_COMPARE_MATCH_DEFINER = 4207,
-};
-
-static inline u64
-mlx5hws_debug_icm_to_idx(u64 icm_addr)
-{
-       return (icm_addr >> 6) & 0xffffffff;
-}
-
-void mlx5hws_debug_init_dump(struct mlx5hws_context *ctx);
-void mlx5hws_debug_uninit_dump(struct mlx5hws_context *ctx);
-
-#endif /* MLX5HWS_DEBUG_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c
deleted file mode 100644 (file)
index 3f4c58b..0000000
+++ /dev/null
@@ -1,2146 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#include "mlx5hws_internal.h"
-
-/* Pattern tunnel Layer bits. */
-#define MLX5_FLOW_LAYER_VXLAN      BIT(12)
-#define MLX5_FLOW_LAYER_VXLAN_GPE  BIT(13)
-#define MLX5_FLOW_LAYER_GRE        BIT(14)
-#define MLX5_FLOW_LAYER_MPLS       BIT(15)
-
-/* Pattern tunnel Layer bits (continued). */
-#define MLX5_FLOW_LAYER_IPIP       BIT(23)
-#define MLX5_FLOW_LAYER_IPV6_ENCAP BIT(24)
-#define MLX5_FLOW_LAYER_NVGRE      BIT(25)
-#define MLX5_FLOW_LAYER_GENEVE     BIT(26)
-
-#define MLX5_FLOW_ITEM_FLEX_TUNNEL BIT_ULL(39)
-
-/* Tunnel Masks. */
-#define MLX5_FLOW_LAYER_TUNNEL \
-       (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
-        MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
-        MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
-        MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \
-        MLX5_FLOW_ITEM_FLEX_TUNNEL)
-
-#define GTP_PDU_SC     0x85
-#define BAD_PORT       0xBAD
-#define ETH_TYPE_IPV4_VXLAN    0x0800
-#define ETH_TYPE_IPV6_VXLAN    0x86DD
-#define UDP_GTPU_PORT  2152
-#define UDP_PORT_MPLS  6635
-#define UDP_GENEVE_PORT 6081
-#define UDP_ROCEV2_PORT        4791
-#define HWS_FLOW_LAYER_TUNNEL_NO_MPLS (MLX5_FLOW_LAYER_TUNNEL & ~MLX5_FLOW_LAYER_MPLS)
-
-#define STE_NO_VLAN    0x0
-#define STE_SVLAN      0x1
-#define STE_CVLAN      0x2
-#define STE_NO_L3      0x0
-#define STE_IPV4       0x1
-#define STE_IPV6       0x2
-#define STE_NO_L4      0x0
-#define STE_TCP                0x1
-#define STE_UDP                0x2
-#define STE_ICMP       0x3
-#define STE_ESP                0x3
-
-#define IPV4 0x4
-#define IPV6 0x6
-
-/* Setter function based on bit offset and mask, for 32bit DW */
-#define _HWS_SET32(p, v, byte_off, bit_off, mask) \
-       do { \
-               u32 _v = v; \
-               *((__be32 *)(p) + ((byte_off) / 4)) = \
-               cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + \
-                            ((byte_off) / 4))) & \
-                            (~((mask) << (bit_off)))) | \
-                           (((_v) & (mask)) << \
-                             (bit_off))); \
-       } while (0)
-
-/* Setter function based on bit offset and mask, for unaligned 32bit DW */
-#define HWS_SET32(p, v, byte_off, bit_off, mask) \
-       do { \
-               if (unlikely((bit_off) < 0)) { \
-                       u32 _bit_off = -1 * (bit_off); \
-                       u32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \
-                       _HWS_SET32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \
-                       _HWS_SET32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \
-                                   (bit_off) % BITS_IN_DW, second_dw_mask); \
-               } else { \
-                       _HWS_SET32(p, v, byte_off, (bit_off), (mask)); \
-               } \
-       } while (0)
-
-/* Getter for up to aligned 32bit DW */
-#define HWS_GET32(p, byte_off, bit_off, mask) \
-       ((be32_to_cpu(*((__be32 *)(p) + ((byte_off) / 4))) >> (bit_off)) & (mask))
-
-#define HWS_CALC_FNAME(field, inner) \
-       ((inner) ? MLX5HWS_DEFINER_FNAME_##field##_I : \
-                  MLX5HWS_DEFINER_FNAME_##field##_O)
-
-#define HWS_GET_MATCH_PARAM(match_param, hdr) \
-       MLX5_GET(fte_match_param, match_param, hdr)
-
-#define HWS_IS_FLD_SET(match_param, hdr) \
-       (!!(HWS_GET_MATCH_PARAM(match_param, hdr)))
-
-#define HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) ({ \
-               BUILD_BUG_ON((sz_in_bits) % 32); \
-               u32 sz = sz_in_bits; \
-               u32 res = 0; \
-               u32 dw_off = __mlx5_dw_off(fte_match_param, hdr); \
-               while (!res && sz >= 32) { \
-                       res = *((match_param) + (dw_off++)); \
-                       sz -= 32; \
-               } \
-               res; \
-       })
-
-#define HWS_IS_FLD_SET_SZ(match_param, hdr, sz_in_bits) \
-       (((sz_in_bits) > 32) ? HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) : \
-                              !!(HWS_GET_MATCH_PARAM(match_param, hdr)))
-
-#define HWS_GET64_MATCH_PARAM(match_param, hdr) \
-       MLX5_GET64(fte_match_param, match_param, hdr)
-
-#define HWS_IS_FLD64_SET(match_param, hdr) \
-       (!!(HWS_GET64_MATCH_PARAM(match_param, hdr)))
-
-#define HWS_CALC_HDR_SRC(fc, s_hdr) \
-       do { \
-               (fc)->s_bit_mask = __mlx5_mask(fte_match_param, s_hdr); \
-               (fc)->s_bit_off = __mlx5_dw_bit_off(fte_match_param, s_hdr); \
-               (fc)->s_byte_off = MLX5_BYTE_OFF(fte_match_param, s_hdr); \
-       } while (0)
-
-#define HWS_CALC_HDR_DST(fc, d_hdr) \
-       do { \
-               (fc)->bit_mask = __mlx5_mask(definer_hl, d_hdr); \
-               (fc)->bit_off = __mlx5_dw_bit_off(definer_hl, d_hdr); \
-               (fc)->byte_off = MLX5_BYTE_OFF(definer_hl, d_hdr); \
-       } while (0)
-
-#define HWS_CALC_HDR(fc, s_hdr, d_hdr) \
-       do { \
-               HWS_CALC_HDR_SRC(fc, s_hdr); \
-               HWS_CALC_HDR_DST(fc, d_hdr); \
-               (fc)->tag_set = &hws_definer_generic_set; \
-       } while (0)
-
-#define HWS_SET_HDR(fc_arr, match_param, fname, s_hdr, d_hdr) \
-       do { \
-               if (HWS_IS_FLD_SET(match_param, s_hdr)) \
-                       HWS_CALC_HDR(&(fc_arr)[MLX5HWS_DEFINER_FNAME_##fname], s_hdr, d_hdr); \
-       } while (0)
-
-struct mlx5hws_definer_sel_ctrl {
-       u8 allowed_full_dw; /* Full DW selectors cover all offsets */
-       u8 allowed_lim_dw;  /* Limited DW selectors cover offset < 64 */
-       u8 allowed_bytes;   /* Bytes selectors, up to offset 255 */
-       u8 used_full_dw;
-       u8 used_lim_dw;
-       u8 used_bytes;
-       u8 full_dw_selector[DW_SELECTORS];
-       u8 lim_dw_selector[DW_SELECTORS_LIMITED];
-       u8 byte_selector[BYTE_SELECTORS];
-};
-
-struct mlx5hws_definer_conv_data {
-       struct mlx5hws_context *ctx;
-       struct mlx5hws_definer_fc *fc;
-       /* enum mlx5hws_definer_match_flag */
-       u32 match_flags;
-};
-
-static void
-hws_definer_ones_set(struct mlx5hws_definer_fc *fc,
-                    void *match_param,
-                    u8 *tag)
-{
-       HWS_SET32(tag, -1, fc->byte_off, fc->bit_off, fc->bit_mask);
-}
-
-static void
-hws_definer_generic_set(struct mlx5hws_definer_fc *fc,
-                       void *match_param,
-                       u8 *tag)
-{
-       /* Can be optimized */
-       u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask);
-
-       HWS_SET32(tag, val, fc->byte_off, fc->bit_off, fc->bit_mask);
-}
-
-static void
-hws_definer_outer_vlan_type_set(struct mlx5hws_definer_fc *fc,
-                               void *match_param,
-                               u8 *tag)
-{
-       if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag))
-               HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
-       else if (HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag))
-               HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
-       else
-               HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
-}
-
-static void
-hws_definer_inner_vlan_type_set(struct mlx5hws_definer_fc *fc,
-                               void *match_param,
-                               u8 *tag)
-{
-       if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag))
-               HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
-       else if (HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag))
-               HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
-       else
-               HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
-}
-
-static void
-hws_definer_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
-                                void *match_param,
-                                u8 *tag,
-                                bool inner)
-{
-       u32 second_cvlan_tag = inner ?
-               HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) :
-               HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag);
-       u32 second_svlan_tag = inner ?
-               HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag) :
-               HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag);
-
-       if (second_cvlan_tag)
-               HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
-       else if (second_svlan_tag)
-               HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
-       else
-               HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
-}
-
-static void
-hws_definer_inner_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
-                                      void *match_param,
-                                      u8 *tag)
-{
-       hws_definer_second_vlan_type_set(fc, match_param, tag, true);
-}
-
-static void
-hws_definer_outer_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
-                                      void *match_param,
-                                      u8 *tag)
-{
-       hws_definer_second_vlan_type_set(fc, match_param, tag, false);
-}
-
-static void hws_definer_icmp_dw1_set(struct mlx5hws_definer_fc *fc,
-                                    void *match_param,
-                                    u8 *tag)
-{
-       u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_code);
-       u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_type);
-       u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) |
-                (code << __mlx5_dw_bit_off(header_icmp, code));
-
-       HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask);
-}
-
-static void
-hws_definer_icmpv6_dw1_set(struct mlx5hws_definer_fc *fc,
-                          void *match_param,
-                          u8 *tag)
-{
-       u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_code);
-       u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_type);
-       u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) |
-                (code << __mlx5_dw_bit_off(header_icmp, code));
-
-       HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask);
-}
-
-static void
-hws_definer_l3_type_set(struct mlx5hws_definer_fc *fc,
-                       void *match_param,
-                       u8 *tag)
-{
-       u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask);
-
-       if (val == IPV4)
-               HWS_SET32(tag, STE_IPV4, fc->byte_off, fc->bit_off, fc->bit_mask);
-       else if (val == IPV6)
-               HWS_SET32(tag, STE_IPV6, fc->byte_off, fc->bit_off, fc->bit_mask);
-       else
-               HWS_SET32(tag, STE_NO_L3, fc->byte_off, fc->bit_off, fc->bit_mask);
-}
-
-static void
-hws_definer_set_source_port_gvmi(struct mlx5hws_definer_fc *fc,
-                                void *match_param,
-                                u8 *tag,
-                                struct mlx5hws_context *peer_ctx)
-{
-       u16 source_port = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port);
-       u16 vport_gvmi = 0;
-       int ret;
-
-       ret = mlx5hws_vport_get_gvmi(peer_ctx, source_port, &vport_gvmi);
-       if (ret) {
-               HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask);
-               mlx5hws_err(fc->ctx, "Vport 0x%x is disabled or invalid\n", source_port);
-               return;
-       }
-
-       if (vport_gvmi)
-               HWS_SET32(tag, vport_gvmi, fc->byte_off, fc->bit_off, fc->bit_mask);
-}
-
-static void
-hws_definer_set_source_gvmi_vhca_id(struct mlx5hws_definer_fc *fc,
-                                   void *match_param,
-                                   u8 *tag)
-__must_hold(&fc->ctx->ctrl_lock)
-{
-       int id = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_eswitch_owner_vhca_id);
-       struct mlx5hws_context *peer_ctx;
-
-       if (id == fc->ctx->caps->vhca_id)
-               peer_ctx = fc->ctx;
-       else
-               peer_ctx = xa_load(&fc->ctx->peer_ctx_xa, id);
-
-       if (!peer_ctx) {
-               HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask);
-               mlx5hws_err(fc->ctx, "Invalid vhca_id provided 0x%x\n", id);
-               return;
-       }
-
-       hws_definer_set_source_port_gvmi(fc, match_param, tag, peer_ctx);
-}
-
-static void
-hws_definer_set_source_gvmi(struct mlx5hws_definer_fc *fc,
-                           void *match_param,
-                           u8 *tag)
-{
-       hws_definer_set_source_port_gvmi(fc, match_param, tag, fc->ctx);
-}
-
-static struct mlx5hws_definer_fc *
-hws_definer_flex_parser_steering_ok_bits_handler(struct mlx5hws_definer_conv_data *cd,
-                                                u8 parser_id)
-{
-       struct mlx5hws_definer_fc *fc;
-
-       switch (parser_id) {
-       case 0:
-               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK];
-               HWS_CALC_HDR_DST(fc, oks1.flex_parser0_steering_ok);
-               fc->tag_set = &hws_definer_generic_set;
-               break;
-       case 1:
-               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK];
-               HWS_CALC_HDR_DST(fc, oks1.flex_parser1_steering_ok);
-               fc->tag_set = &hws_definer_generic_set;
-               break;
-       case 2:
-               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK];
-               HWS_CALC_HDR_DST(fc, oks1.flex_parser2_steering_ok);
-               fc->tag_set = &hws_definer_generic_set;
-               break;
-       case 3:
-               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK];
-               HWS_CALC_HDR_DST(fc, oks1.flex_parser3_steering_ok);
-               fc->tag_set = &hws_definer_generic_set;
-               break;
-       case 4:
-               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK];
-               HWS_CALC_HDR_DST(fc, oks1.flex_parser4_steering_ok);
-               fc->tag_set = &hws_definer_generic_set;
-               break;
-       case 5:
-               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK];
-               HWS_CALC_HDR_DST(fc, oks1.flex_parser5_steering_ok);
-               fc->tag_set = &hws_definer_generic_set;
-               break;
-       case 6:
-               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK];
-               HWS_CALC_HDR_DST(fc, oks1.flex_parser6_steering_ok);
-               fc->tag_set = &hws_definer_generic_set;
-               break;
-       case 7:
-               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK];
-               HWS_CALC_HDR_DST(fc, oks1.flex_parser7_steering_ok);
-               fc->tag_set = &hws_definer_generic_set;
-               break;
-       default:
-               mlx5hws_err(cd->ctx, "Unsupported flex parser steering ok index %u\n", parser_id);
-               return NULL;
-       }
-
-       return fc;
-}
-
-static struct mlx5hws_definer_fc *
-hws_definer_flex_parser_handler(struct mlx5hws_definer_conv_data *cd,
-                               u8 parser_id)
-{
-       struct mlx5hws_definer_fc *fc;
-
-       switch (parser_id) {
-       case 0:
-               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0];
-               HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_0);
-               fc->tag_set = &hws_definer_generic_set;
-               break;
-       case 1:
-               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1];
-               HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_1);
-               fc->tag_set = &hws_definer_generic_set;
-               break;
-       case 2:
-               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2];
-               HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_2);
-               fc->tag_set = &hws_definer_generic_set;
-               break;
-       case 3:
-               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3];
-               HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_3);
-               fc->tag_set = &hws_definer_generic_set;
-               break;
-       case 4:
-               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4];
-               HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_4);
-               fc->tag_set = &hws_definer_generic_set;
-               break;
-       case 5:
-               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5];
-               HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_5);
-               fc->tag_set = &hws_definer_generic_set;
-               break;
-       case 6:
-               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6];
-               HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_6);
-               fc->tag_set = &hws_definer_generic_set;
-               break;
-       case 7:
-               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7];
-               HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_7);
-               fc->tag_set = &hws_definer_generic_set;
-               break;
-       default:
-               mlx5hws_err(cd->ctx, "Unsupported flex parser %u\n", parser_id);
-               return NULL;
-       }
-
-       return fc;
-}
-
-static struct mlx5hws_definer_fc *
-hws_definer_misc4_fields_handler(struct mlx5hws_definer_conv_data *cd,
-                                bool *parser_is_used,
-                                u32 id,
-                                u32 value)
-{
-       if (id || value) {
-               if (id >= HWS_NUM_OF_FLEX_PARSERS) {
-                       mlx5hws_err(cd->ctx, "Unsupported parser id\n");
-                       return NULL;
-               }
-
-               if (parser_is_used[id]) {
-                       mlx5hws_err(cd->ctx, "Parser id have been used\n");
-                       return NULL;
-               }
-       }
-
-       parser_is_used[id] = true;
-
-       return hws_definer_flex_parser_handler(cd, id);
-}
-
-static int
-hws_definer_check_match_flags(struct mlx5hws_definer_conv_data *cd)
-{
-       u32 flags;
-
-       flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE |
-                                  MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE |
-                                  MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU |
-                                  MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE |
-                                  MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN |
-                                  MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1);
-       if (flags & (flags - 1))
-               goto err_conflict;
-
-       flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY |
-                                  MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2);
-
-       if (flags & (flags - 1))
-               goto err_conflict;
-
-       flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE |
-                                  MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP);
-       if (flags & (flags - 1))
-               goto err_conflict;
-
-       flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4 |
-                                  MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6 |
-                                  MLX5HWS_DEFINER_MATCH_FLAG_TCP_O |
-                                  MLX5HWS_DEFINER_MATCH_FLAG_TCP_I);
-       if (flags & (flags - 1))
-               goto err_conflict;
-
-       return 0;
-
-err_conflict:
-       mlx5hws_err(cd->ctx, "Invalid definer fields combination\n");
-       return -EINVAL;
-}
-
-static int
-hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
-                      u32 *match_param)
-{
-       bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
-       struct mlx5hws_definer_fc *fc = cd->fc;
-       struct mlx5hws_definer_fc *curr_fc;
-       u32 *s_ipv6, *d_ipv6;
-
-       if (HWS_IS_FLD_SET_SZ(match_param, outer_headers.l4_type, 0x2) ||
-           HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c2, 0xe) ||
-           HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c4, 0x4)) {
-               mlx5hws_err(cd->ctx, "Unsupported outer parameters set\n");
-               return -EINVAL;
-       }
-
-       /* L2 Check ethertype */
-       HWS_SET_HDR(fc, match_param, ETH_TYPE_O,
-                   outer_headers.ethertype,
-                   eth_l2_outer.l3_ethertype);
-       /* L2 Check SMAC 47_16 */
-       HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_O,
-                   outer_headers.smac_47_16, eth_l2_src_outer.smac_47_16);
-       /* L2 Check SMAC 15_0 */
-       HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_O,
-                   outer_headers.smac_15_0, eth_l2_src_outer.smac_15_0);
-       /* L2 Check DMAC 47_16 */
-       HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_O,
-                   outer_headers.dmac_47_16, eth_l2_outer.dmac_47_16);
-       /* L2 Check DMAC 15_0 */
-       HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_O,
-                   outer_headers.dmac_15_0, eth_l2_outer.dmac_15_0);
-
-       /* L2 VLAN */
-       HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_O,
-                   outer_headers.first_prio, eth_l2_outer.first_priority);
-       HWS_SET_HDR(fc, match_param, VLAN_CFI_O,
-                   outer_headers.first_cfi, eth_l2_outer.first_cfi);
-       HWS_SET_HDR(fc, match_param, VLAN_ID_O,
-                   outer_headers.first_vid, eth_l2_outer.first_vlan_id);
-
-       /* L2 CVLAN and SVLAN */
-       if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag) ||
-           HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag)) {
-               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O];
-               HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.first_vlan_qualifier);
-               curr_fc->tag_set = &hws_definer_outer_vlan_type_set;
-               curr_fc->tag_mask_set = &hws_definer_ones_set;
-       }
-
-       /* L3 Check IP header */
-       HWS_SET_HDR(fc, match_param, IP_PROTOCOL_O,
-                   outer_headers.ip_protocol,
-                   eth_l3_outer.protocol_next_header);
-       HWS_SET_HDR(fc, match_param, IP_TTL_O,
-                   outer_headers.ttl_hoplimit,
-                   eth_l3_outer.time_to_live_hop_limit);
-
-       /* L3 Check IPv4/IPv6 addresses */
-       s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
-                             outer_headers.src_ipv4_src_ipv6.ipv6_layout);
-       d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
-                             outer_headers.dst_ipv4_dst_ipv6.ipv6_layout);
-
-       /* Assume IPv6 is used if ipv6 bits are set */
-       is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
-       is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
-
-       if (is_s_ipv6) {
-               /* Handle IPv6 source address */
-               HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_O,
-                           outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
-                           ipv6_src_outer.ipv6_address_127_96);
-               HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_O,
-                           outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64,
-                           ipv6_src_outer.ipv6_address_95_64);
-               HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_O,
-                           outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32,
-                           ipv6_src_outer.ipv6_address_63_32);
-               HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_O,
-                           outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
-                           ipv6_src_outer.ipv6_address_31_0);
-       } else {
-               /* Handle IPv4 source address */
-               HWS_SET_HDR(fc, match_param, IPV4_SRC_O,
-                           outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
-                           ipv4_src_dest_outer.source_address);
-       }
-       if (is_d_ipv6) {
-               /* Handle IPv6 destination address */
-               HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_O,
-                           outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
-                           ipv6_dst_outer.ipv6_address_127_96);
-               HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_O,
-                           outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64,
-                           ipv6_dst_outer.ipv6_address_95_64);
-               HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_O,
-                           outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32,
-                           ipv6_dst_outer.ipv6_address_63_32);
-               HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_O,
-                           outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
-                           ipv6_dst_outer.ipv6_address_31_0);
-       } else {
-               /* Handle IPv4 destination address */
-               HWS_SET_HDR(fc, match_param, IPV4_DST_O,
-                           outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
-                           ipv4_src_dest_outer.destination_address);
-       }
-
-       /* L4 Handle TCP/UDP */
-       HWS_SET_HDR(fc, match_param, L4_SPORT_O,
-                   outer_headers.tcp_sport, eth_l4_outer.source_port);
-       HWS_SET_HDR(fc, match_param, L4_DPORT_O,
-                   outer_headers.tcp_dport, eth_l4_outer.destination_port);
-       HWS_SET_HDR(fc, match_param, L4_SPORT_O,
-                   outer_headers.udp_sport, eth_l4_outer.source_port);
-       HWS_SET_HDR(fc, match_param, L4_DPORT_O,
-                   outer_headers.udp_dport, eth_l4_outer.destination_port);
-       HWS_SET_HDR(fc, match_param, TCP_FLAGS_O,
-                   outer_headers.tcp_flags, eth_l4_outer.tcp_flags);
-
-       /* L3 Handle DSCP, ECN and IHL  */
-       HWS_SET_HDR(fc, match_param, IP_DSCP_O,
-                   outer_headers.ip_dscp, eth_l3_outer.dscp);
-       HWS_SET_HDR(fc, match_param, IP_ECN_O,
-                   outer_headers.ip_ecn, eth_l3_outer.ecn);
-       HWS_SET_HDR(fc, match_param, IPV4_IHL_O,
-                   outer_headers.ipv4_ihl, eth_l3_outer.ihl);
-
-       /* Set IP fragmented bit */
-       if (HWS_IS_FLD_SET(match_param, outer_headers.frag)) {
-               smac_set = HWS_IS_FLD_SET(match_param, outer_headers.smac_15_0) ||
-                               HWS_IS_FLD_SET(match_param, outer_headers.smac_47_16);
-               dmac_set = HWS_IS_FLD_SET(match_param, outer_headers.dmac_15_0) ||
-                               HWS_IS_FLD_SET(match_param, outer_headers.dmac_47_16);
-               if (smac_set == dmac_set) {
-                       HWS_SET_HDR(fc, match_param, IP_FRAG_O,
-                                   outer_headers.frag, eth_l4_outer.ip_fragmented);
-               } else {
-                       HWS_SET_HDR(fc, match_param, IP_FRAG_O,
-                                   outer_headers.frag, eth_l2_src_outer.ip_fragmented);
-               }
-       }
-
-       /* L3_type set */
-       if (HWS_IS_FLD_SET(match_param, outer_headers.ip_version)) {
-               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O];
-               HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.l3_type);
-               curr_fc->tag_set = &hws_definer_l3_type_set;
-               curr_fc->tag_mask_set = &hws_definer_ones_set;
-               HWS_CALC_HDR_SRC(curr_fc, outer_headers.ip_version);
-       }
-
-       return 0;
-}
-
-static int
-hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
-                      u32 *match_param)
-{
-       bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
-       struct mlx5hws_definer_fc *fc = cd->fc;
-       struct mlx5hws_definer_fc *curr_fc;
-       u32 *s_ipv6, *d_ipv6;
-
-       if (HWS_IS_FLD_SET_SZ(match_param, inner_headers.l4_type, 0x2) ||
-           HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c2, 0xe) ||
-           HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c4, 0x4)) {
-               mlx5hws_err(cd->ctx, "Unsupported inner parameters set\n");
-               return -EINVAL;
-       }
-
-       /* L2 Check ethertype */
-       HWS_SET_HDR(fc, match_param, ETH_TYPE_I,
-                   inner_headers.ethertype,
-                   eth_l2_inner.l3_ethertype);
-       /* L2 Check SMAC 47_16 */
-       HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_I,
-                   inner_headers.smac_47_16, eth_l2_src_inner.smac_47_16);
-       /* L2 Check SMAC 15_0 */
-       HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_I,
-                   inner_headers.smac_15_0, eth_l2_src_inner.smac_15_0);
-       /* L2 Check DMAC 47_16 */
-       HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_I,
-                   inner_headers.dmac_47_16, eth_l2_inner.dmac_47_16);
-       /* L2 Check DMAC 15_0 */
-       HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_I,
-                   inner_headers.dmac_15_0, eth_l2_inner.dmac_15_0);
-
-       /* L2 VLAN */
-       HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_I,
-                   inner_headers.first_prio, eth_l2_inner.first_priority);
-       HWS_SET_HDR(fc, match_param, VLAN_CFI_I,
-                   inner_headers.first_cfi, eth_l2_inner.first_cfi);
-       HWS_SET_HDR(fc, match_param, VLAN_ID_I,
-                   inner_headers.first_vid, eth_l2_inner.first_vlan_id);
-
-       /* L2 CVLAN and SVLAN */
-       if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag) ||
-           HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag)) {
-               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I];
-               HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.first_vlan_qualifier);
-               curr_fc->tag_set = &hws_definer_inner_vlan_type_set;
-               curr_fc->tag_mask_set = &hws_definer_ones_set;
-       }
-       /* L3 Check IP header */
-       HWS_SET_HDR(fc, match_param, IP_PROTOCOL_I,
-                   inner_headers.ip_protocol,
-                   eth_l3_inner.protocol_next_header);
-       HWS_SET_HDR(fc, match_param, IP_VERSION_I,
-                   inner_headers.ip_version,
-                   eth_l3_inner.ip_version);
-       HWS_SET_HDR(fc, match_param, IP_TTL_I,
-                   inner_headers.ttl_hoplimit,
-                   eth_l3_inner.time_to_live_hop_limit);
-
-       /* L3 Check IPv4/IPv6 addresses */
-       s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
-                             inner_headers.src_ipv4_src_ipv6.ipv6_layout);
-       d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
-                             inner_headers.dst_ipv4_dst_ipv6.ipv6_layout);
-
-       /* Assume IPv6 is used if ipv6 bits are set */
-       is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
-       is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
-
-       if (is_s_ipv6) {
-               /* Handle IPv6 source address */
-               HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_I,
-                           inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
-                           ipv6_src_inner.ipv6_address_127_96);
-               HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_I,
-                           inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64,
-                           ipv6_src_inner.ipv6_address_95_64);
-               HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_I,
-                           inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32,
-                           ipv6_src_inner.ipv6_address_63_32);
-               HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_I,
-                           inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
-                           ipv6_src_inner.ipv6_address_31_0);
-       } else {
-               /* Handle IPv4 source address */
-               HWS_SET_HDR(fc, match_param, IPV4_SRC_I,
-                           inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
-                           ipv4_src_dest_inner.source_address);
-       }
-       if (is_d_ipv6) {
-               /* Handle IPv6 destination address */
-               HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_I,
-                           inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
-                           ipv6_dst_inner.ipv6_address_127_96);
-               HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_I,
-                           inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64,
-                           ipv6_dst_inner.ipv6_address_95_64);
-               HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_I,
-                           inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32,
-                           ipv6_dst_inner.ipv6_address_63_32);
-               HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_I,
-                           inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
-                           ipv6_dst_inner.ipv6_address_31_0);
-       } else {
-               /* Handle IPv4 destination address */
-               HWS_SET_HDR(fc, match_param, IPV4_DST_I,
-                           inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
-                           ipv4_src_dest_inner.destination_address);
-       }
-
-       /* L4 Handle TCP/UDP */
-       HWS_SET_HDR(fc, match_param, L4_SPORT_I,
-                   inner_headers.tcp_sport, eth_l4_inner.source_port);
-       HWS_SET_HDR(fc, match_param, L4_DPORT_I,
-                   inner_headers.tcp_dport, eth_l4_inner.destination_port);
-       HWS_SET_HDR(fc, match_param, L4_SPORT_I,
-                   inner_headers.udp_sport, eth_l4_inner.source_port);
-       HWS_SET_HDR(fc, match_param, L4_DPORT_I,
-                   inner_headers.udp_dport, eth_l4_inner.destination_port);
-       HWS_SET_HDR(fc, match_param, TCP_FLAGS_I,
-                   inner_headers.tcp_flags, eth_l4_inner.tcp_flags);
-
-       /* L3 Handle DSCP, ECN and IHL  */
-       HWS_SET_HDR(fc, match_param, IP_DSCP_I,
-                   inner_headers.ip_dscp, eth_l3_inner.dscp);
-       HWS_SET_HDR(fc, match_param, IP_ECN_I,
-                   inner_headers.ip_ecn, eth_l3_inner.ecn);
-       HWS_SET_HDR(fc, match_param, IPV4_IHL_I,
-                   inner_headers.ipv4_ihl, eth_l3_inner.ihl);
-
-       /* Set IP fragmented bit */
-       if (HWS_IS_FLD_SET(match_param, inner_headers.frag)) {
-               if (HWS_IS_FLD_SET(match_param, misc_parameters.vxlan_vni)) {
-                       HWS_SET_HDR(fc, match_param, IP_FRAG_I,
-                                   inner_headers.frag, eth_l2_inner.ip_fragmented);
-               } else {
-                       smac_set = HWS_IS_FLD_SET(match_param, inner_headers.smac_15_0) ||
-                                  HWS_IS_FLD_SET(match_param, inner_headers.smac_47_16);
-                       dmac_set = HWS_IS_FLD_SET(match_param, inner_headers.dmac_15_0) ||
-                                  HWS_IS_FLD_SET(match_param, inner_headers.dmac_47_16);
-                       if (smac_set == dmac_set) {
-                               HWS_SET_HDR(fc, match_param, IP_FRAG_I,
-                                           inner_headers.frag, eth_l4_inner.ip_fragmented);
-                       } else {
-                               HWS_SET_HDR(fc, match_param, IP_FRAG_I,
-                                           inner_headers.frag, eth_l2_src_inner.ip_fragmented);
-                       }
-               }
-       }
-
-       /* L3_type set */
-       if (HWS_IS_FLD_SET(match_param, inner_headers.ip_version)) {
-               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I];
-               HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.l3_type);
-               curr_fc->tag_set = &hws_definer_l3_type_set;
-               curr_fc->tag_mask_set = &hws_definer_ones_set;
-               HWS_CALC_HDR_SRC(curr_fc, inner_headers.ip_version);
-       }
-
-       return 0;
-}
-
-static int
-hws_definer_conv_misc(struct mlx5hws_definer_conv_data *cd,
-                     u32 *match_param)
-{
-       struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
-       struct mlx5hws_definer_fc *fc = cd->fc;
-       struct mlx5hws_definer_fc *curr_fc;
-
-       if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1, 0x1) ||
-           HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_64, 0xc) ||
-           HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_d8, 0x6) ||
-           HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_e0, 0xc) ||
-           HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_100, 0xc) ||
-           HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_120, 0xa) ||
-           HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_140, 0x8) ||
-           HWS_IS_FLD_SET(match_param, misc_parameters.bth_dst_qp) ||
-           HWS_IS_FLD_SET(match_param, misc_parameters.bth_opcode) ||
-           HWS_IS_FLD_SET(match_param, misc_parameters.inner_esp_spi) ||
-           HWS_IS_FLD_SET(match_param, misc_parameters.outer_esp_spi) ||
-           HWS_IS_FLD_SET(match_param, misc_parameters.source_vhca_port) ||
-           HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1a0, 0x60)) {
-               mlx5hws_err(cd->ctx, "Unsupported misc parameters set\n");
-               return -EINVAL;
-       }
-
-       /* Check GRE related fields */
-       if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_c_present)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
-               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_C];
-               HWS_CALC_HDR(curr_fc,
-                            misc_parameters.gre_c_present,
-                            tunnel_header.tunnel_header_0);
-               curr_fc->bit_mask = __mlx5_mask(header_gre, gre_c_present);
-               curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_c_present);
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_k_present)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
-               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_K];
-               HWS_CALC_HDR(curr_fc,
-                            misc_parameters.gre_k_present,
-                            tunnel_header.tunnel_header_0);
-               curr_fc->bit_mask = __mlx5_mask(header_gre, gre_k_present);
-               curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_k_present);
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_s_present)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
-               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_S];
-               HWS_CALC_HDR(curr_fc,
-                            misc_parameters.gre_s_present,
-                            tunnel_header.tunnel_header_0);
-               curr_fc->bit_mask = __mlx5_mask(header_gre, gre_s_present);
-               curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_s_present);
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_protocol)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
-               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL];
-               HWS_CALC_HDR(curr_fc,
-                            misc_parameters.gre_protocol,
-                            tunnel_header.tunnel_header_0);
-               curr_fc->bit_mask = __mlx5_mask(header_gre, gre_protocol);
-               curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_protocol);
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_key.key)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE |
-                                  MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY;
-               HWS_SET_HDR(fc, match_param, GRE_OPT_KEY,
-                           misc_parameters.gre_key.key, tunnel_header.tunnel_header_2);
-       }
-
-       /* Check GENEVE related fields */
-       if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_vni)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
-               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_VNI];
-               HWS_CALC_HDR(curr_fc,
-                            misc_parameters.geneve_vni,
-                            tunnel_header.tunnel_header_1);
-               curr_fc->bit_mask = __mlx5_mask(header_geneve, vni);
-               curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, vni);
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_opt_len)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
-               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN];
-               HWS_CALC_HDR(curr_fc,
-                            misc_parameters.geneve_opt_len,
-                            tunnel_header.tunnel_header_0);
-               curr_fc->bit_mask = __mlx5_mask(header_geneve, opt_len);
-               curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, opt_len);
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_protocol_type)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
-               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_PROTO];
-               HWS_CALC_HDR(curr_fc,
-                            misc_parameters.geneve_protocol_type,
-                            tunnel_header.tunnel_header_0);
-               curr_fc->bit_mask = __mlx5_mask(header_geneve, protocol_type);
-               curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, protocol_type);
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_oam)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
-               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OAM];
-               HWS_CALC_HDR(curr_fc,
-                            misc_parameters.geneve_oam,
-                            tunnel_header.tunnel_header_0);
-               curr_fc->bit_mask = __mlx5_mask(header_geneve, o_flag);
-               curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, o_flag);
-       }
-
-       HWS_SET_HDR(fc, match_param, SOURCE_QP,
-                   misc_parameters.source_sqn, source_qp_gvmi.source_qp);
-       HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_O,
-                   misc_parameters.outer_ipv6_flow_label, eth_l3_outer.flow_label);
-       HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_I,
-                   misc_parameters.inner_ipv6_flow_label, eth_l3_inner.flow_label);
-
-       /* L2 Second VLAN */
-       HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_O,
-                   misc_parameters.outer_second_prio, eth_l2_outer.second_priority);
-       HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_I,
-                   misc_parameters.inner_second_prio, eth_l2_inner.second_priority);
-       HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_O,
-                   misc_parameters.outer_second_cfi, eth_l2_outer.second_cfi);
-       HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_I,
-                   misc_parameters.inner_second_cfi, eth_l2_inner.second_cfi);
-       HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_O,
-                   misc_parameters.outer_second_vid, eth_l2_outer.second_vlan_id);
-       HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_I,
-                   misc_parameters.inner_second_vid, eth_l2_inner.second_vlan_id);
-
-       /* L2 Second CVLAN and SVLAN */
-       if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag) ||
-           HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag)) {
-               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O];
-               HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.second_vlan_qualifier);
-               curr_fc->tag_set = &hws_definer_outer_second_vlan_type_set;
-               curr_fc->tag_mask_set = &hws_definer_ones_set;
-       }
-
-       if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) ||
-           HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag)) {
-               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I];
-               HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.second_vlan_qualifier);
-               curr_fc->tag_set = &hws_definer_inner_second_vlan_type_set;
-               curr_fc->tag_mask_set = &hws_definer_ones_set;
-       }
-
-       /* VXLAN VNI  */
-       if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.vxlan_vni)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN;
-               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_VNI];
-               HWS_CALC_HDR(curr_fc, misc_parameters.vxlan_vni, tunnel_header.tunnel_header_1);
-               curr_fc->bit_mask = __mlx5_mask(header_vxlan, vni);
-               curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan, vni);
-       }
-
-       /* Flex protocol steering ok bits */
-       if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.geneve_tlv_option_0_exist)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
-
-               if (!caps->flex_parser_ok_bits_supp) {
-                       mlx5hws_err(cd->ctx, "Unsupported flex_parser_ok_bits_supp capability\n");
-                       return -EOPNOTSUPP;
-               }
-
-               curr_fc = hws_definer_flex_parser_steering_ok_bits_handler(
-                               cd, caps->flex_parser_id_geneve_tlv_option_0);
-               if (!curr_fc)
-                       return -EINVAL;
-
-               HWS_CALC_HDR_SRC(fc, misc_parameters.geneve_tlv_option_0_exist);
-       }
-
-       if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port)) {
-               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_SOURCE_GVMI];
-               HWS_CALC_HDR_DST(curr_fc, source_qp_gvmi.source_gvmi);
-               curr_fc->tag_mask_set = &hws_definer_ones_set;
-               curr_fc->tag_set = HWS_IS_FLD_SET(match_param,
-                                                 misc_parameters.source_eswitch_owner_vhca_id) ?
-                                                 &hws_definer_set_source_gvmi_vhca_id :
-                                                 &hws_definer_set_source_gvmi;
-       } else {
-               if (HWS_IS_FLD_SET(match_param, misc_parameters.source_eswitch_owner_vhca_id)) {
-                       mlx5hws_err(cd->ctx,
-                                   "Unsupported source_eswitch_owner_vhca_id field usage\n");
-                       return -EOPNOTSUPP;
-               }
-       }
-
-       return 0;
-}
-
-static int
-hws_definer_conv_misc2(struct mlx5hws_definer_conv_data *cd,
-                      u32 *match_param)
-{
-       struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
-       struct mlx5hws_definer_fc *fc = cd->fc;
-       struct mlx5hws_definer_fc *curr_fc;
-
-       if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1a0, 0x8) ||
-           HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1b8, 0x8) ||
-           HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1c0, 0x40) ||
-           HWS_IS_FLD_SET(match_param, misc_parameters_2.macsec_syndrome) ||
-           HWS_IS_FLD_SET(match_param, misc_parameters_2.ipsec_syndrome)) {
-               mlx5hws_err(cd->ctx, "Unsupported misc2 parameters set\n");
-               return -EINVAL;
-       }
-
-       HWS_SET_HDR(fc, match_param, MPLS0_O,
-                   misc_parameters_2.outer_first_mpls, mpls_outer.mpls0_label);
-       HWS_SET_HDR(fc, match_param, MPLS0_I,
-                   misc_parameters_2.inner_first_mpls, mpls_inner.mpls0_label);
-       HWS_SET_HDR(fc, match_param, REG_0,
-                   misc_parameters_2.metadata_reg_c_0, registers.register_c_0);
-       HWS_SET_HDR(fc, match_param, REG_1,
-                   misc_parameters_2.metadata_reg_c_1, registers.register_c_1);
-       HWS_SET_HDR(fc, match_param, REG_2,
-                   misc_parameters_2.metadata_reg_c_2, registers.register_c_2);
-       HWS_SET_HDR(fc, match_param, REG_3,
-                   misc_parameters_2.metadata_reg_c_3, registers.register_c_3);
-       HWS_SET_HDR(fc, match_param, REG_4,
-                   misc_parameters_2.metadata_reg_c_4, registers.register_c_4);
-       HWS_SET_HDR(fc, match_param, REG_5,
-                   misc_parameters_2.metadata_reg_c_5, registers.register_c_5);
-       HWS_SET_HDR(fc, match_param, REG_6,
-                   misc_parameters_2.metadata_reg_c_6, registers.register_c_6);
-       HWS_SET_HDR(fc, match_param, REG_7,
-                   misc_parameters_2.metadata_reg_c_7, registers.register_c_7);
-       HWS_SET_HDR(fc, match_param, REG_A,
-                   misc_parameters_2.metadata_reg_a, metadata.general_purpose);
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_gre)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE;
-
-               if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)) {
-                       mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over gre parameters set\n");
-                       return -EOPNOTSUPP;
-               }
-
-               curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_gre);
-               if (!curr_fc)
-                       return -EINVAL;
-
-               HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_gre);
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_udp)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP;
-
-               if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)) {
-                       mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over udp parameters set\n");
-                       return -EOPNOTSUPP;
-               }
-
-               curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_udp);
-               if (!curr_fc)
-                       return -EINVAL;
-
-               HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_udp);
-       }
-
-       return 0;
-}
-
-static int
-hws_definer_conv_misc3(struct mlx5hws_definer_conv_data *cd, u32 *match_param)
-{
-       struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
-       struct mlx5hws_definer_fc *fc = cd->fc;
-       struct mlx5hws_definer_fc *curr_fc;
-       bool vxlan_gpe_flex_parser_enabled;
-
-       /* Check reserved and unsupported fields */
-       if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_80, 0x8) ||
-           HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_b0, 0x10) ||
-           HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_170, 0x10) ||
-           HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_1e0, 0x20)) {
-               mlx5hws_err(cd->ctx, "Unsupported misc3 parameters set\n");
-               return -EINVAL;
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_seq_num) ||
-           HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_ack_num)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_I;
-               HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM,
-                           misc_parameters_3.inner_tcp_seq_num, tcp_icmp.tcp_seq);
-               HWS_SET_HDR(fc, match_param, TCP_ACK_NUM,
-                           misc_parameters_3.inner_tcp_ack_num, tcp_icmp.tcp_ack);
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_seq_num) ||
-           HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_ack_num)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_O;
-               HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM,
-                           misc_parameters_3.outer_tcp_seq_num, tcp_icmp.tcp_seq);
-               HWS_SET_HDR(fc, match_param, TCP_ACK_NUM,
-                           misc_parameters_3.outer_tcp_ack_num, tcp_icmp.tcp_ack);
-       }
-
-       vxlan_gpe_flex_parser_enabled = caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_vni)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
-
-               if (!vxlan_gpe_flex_parser_enabled) {
-                       mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
-                       return -EOPNOTSUPP;
-               }
-
-               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI];
-               HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_vni,
-                            tunnel_header.tunnel_header_1);
-               curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, vni);
-               curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, vni);
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_next_protocol)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
-
-               if (!vxlan_gpe_flex_parser_enabled) {
-                       mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
-                       return -EOPNOTSUPP;
-               }
-
-               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO];
-               HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_next_protocol,
-                            tunnel_header.tunnel_header_0);
-               curr_fc->byte_off += MLX5_BYTE_OFF(header_vxlan_gpe, protocol);
-               curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, protocol);
-               curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, protocol);
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_flags)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
-
-               if (!vxlan_gpe_flex_parser_enabled) {
-                       mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
-                       return -EOPNOTSUPP;
-               }
-
-               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS];
-               HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_flags,
-                            tunnel_header.tunnel_header_0);
-               curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, flags);
-               curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, flags);
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_header_data) ||
-           HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) ||
-           HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4;
-
-               if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED)) {
-                       mlx5hws_err(cd->ctx, "Unsupported ICMPv4 flex parser\n");
-                       return -EOPNOTSUPP;
-               }
-
-               HWS_SET_HDR(fc, match_param, ICMP_DW3,
-                           misc_parameters_3.icmp_header_data, tcp_icmp.icmp_dw3);
-
-               if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) ||
-                   HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) {
-                       curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1];
-                       HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1);
-                       curr_fc->tag_set = &hws_definer_icmp_dw1_set;
-               }
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_header_data) ||
-           HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) ||
-           HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6;
-
-               if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED)) {
-                       mlx5hws_err(cd->ctx, "Unsupported ICMPv6 parser\n");
-                       return -EOPNOTSUPP;
-               }
-
-               HWS_SET_HDR(fc, match_param, ICMP_DW3,
-                           misc_parameters_3.icmpv6_header_data, tcp_icmp.icmp_dw3);
-
-               if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) ||
-                   HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) {
-                       curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1];
-                       HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1);
-                       curr_fc->tag_set = &hws_definer_icmpv6_dw1_set;
-               }
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.geneve_tlv_option_0_data)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
-
-               curr_fc =
-                       hws_definer_flex_parser_handler(cd,
-                                                       caps->flex_parser_id_geneve_tlv_option_0);
-               if (!curr_fc)
-                       return -EINVAL;
-
-               HWS_CALC_HDR_SRC(fc, misc_parameters_3.geneve_tlv_option_0_data);
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_teid)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
-
-               if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)) {
-                       mlx5hws_err(cd->ctx, "Unsupported GTPU TEID flex parser\n");
-                       return -EOPNOTSUPP;
-               }
-
-               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_TEID];
-               fc->tag_set = &hws_definer_generic_set;
-               fc->bit_mask = __mlx5_mask(header_gtp, teid);
-               fc->byte_off = caps->format_select_gtpu_dw_1 * DW_SIZE;
-               HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_teid);
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_type)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
-
-               if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) {
-                       mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n");
-                       return -EOPNOTSUPP;
-               }
-
-               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE];
-               fc->tag_set = &hws_definer_generic_set;
-               fc->bit_mask = __mlx5_mask(header_gtp, msg_type);
-               fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_type);
-               fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
-               HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_type);
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_flags)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
-
-               if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) {
-                       mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n");
-                       return -EOPNOTSUPP;
-               }
-
-               fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE];
-               fc->tag_set = &hws_definer_generic_set;
-               fc->bit_mask = __mlx5_mask(header_gtp, msg_flags);
-               fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_flags);
-               fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
-               HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_flags);
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_2)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
-
-               if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)) {
-                       mlx5hws_err(cd->ctx, "Unsupported GTPU DW2 flex parser\n");
-                       return -EOPNOTSUPP;
-               }
-
-               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW2];
-               curr_fc->tag_set = &hws_definer_generic_set;
-               curr_fc->bit_mask = -1;
-               curr_fc->byte_off = caps->format_select_gtpu_dw_2 * DW_SIZE;
-               HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_2);
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_first_ext_dw_0)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
-
-               if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)) {
-                       mlx5hws_err(cd->ctx, "Unsupported GTPU first EXT DW0 flex parser\n");
-                       return -EOPNOTSUPP;
-               }
-
-               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0];
-               curr_fc->tag_set = &hws_definer_generic_set;
-               curr_fc->bit_mask = -1;
-               curr_fc->byte_off = caps->format_select_gtpu_ext_dw_0 * DW_SIZE;
-               HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_first_ext_dw_0);
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_0)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
-
-               if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)) {
-                       mlx5hws_err(cd->ctx, "Unsupported GTPU DW0 flex parser\n");
-                       return -EOPNOTSUPP;
-               }
-
-               curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW0];
-               curr_fc->tag_set = &hws_definer_generic_set;
-               curr_fc->bit_mask = -1;
-               curr_fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
-               HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_0);
-       }
-
-       return 0;
-}
-
-static int
-hws_definer_conv_misc4(struct mlx5hws_definer_conv_data *cd,
-                      u32 *match_param)
-{
-       bool parser_is_used[HWS_NUM_OF_FLEX_PARSERS] = {};
-       struct mlx5hws_definer_fc *fc;
-       u32 id, value;
-
-       if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_4.reserved_at_100, 0x100)) {
-               mlx5hws_err(cd->ctx, "Unsupported misc4 parameters set\n");
-               return -EINVAL;
-       }
-
-       id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_0);
-       value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_0);
-       fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
-       if (!fc)
-               return -EINVAL;
-
-       HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_0);
-
-       id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_1);
-       value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_1);
-       fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
-       if (!fc)
-               return -EINVAL;
-
-       HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_1);
-
-       id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_2);
-       value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_2);
-       fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
-       if (!fc)
-               return -EINVAL;
-
-       HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_2);
-
-       id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_3);
-       value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_3);
-       fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
-       if (!fc)
-               return -EINVAL;
-
-       HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_3);
-
-       return 0;
-}
-
-static int
-hws_definer_conv_misc5(struct mlx5hws_definer_conv_data *cd,
-                      u32 *match_param)
-{
-       struct mlx5hws_definer_fc *fc = cd->fc;
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_0) ||
-           HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_1) ||
-           HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_2) ||
-           HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_3) ||
-           HWS_IS_FLD_SET_SZ(match_param, misc_parameters_5.reserved_at_100, 0x100)) {
-               mlx5hws_err(cd->ctx, "Unsupported misc5 parameters set\n");
-               return -EINVAL;
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_0)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1;
-               HWS_SET_HDR(fc, match_param, TNL_HDR_0,
-                           misc_parameters_5.tunnel_header_0, tunnel_header.tunnel_header_0);
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_1)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1;
-               HWS_SET_HDR(fc, match_param, TNL_HDR_1,
-                           misc_parameters_5.tunnel_header_1, tunnel_header.tunnel_header_1);
-       }
-
-       if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_2)) {
-               cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2;
-               HWS_SET_HDR(fc, match_param, TNL_HDR_2,
-                           misc_parameters_5.tunnel_header_2, tunnel_header.tunnel_header_2);
-       }
-
-       HWS_SET_HDR(fc, match_param, TNL_HDR_3,
-                   misc_parameters_5.tunnel_header_3, tunnel_header.tunnel_header_3);
-
-       return 0;
-}
-
-static int hws_definer_get_fc_size(struct mlx5hws_definer_fc *fc)
-{
-       u32 fc_sz = 0;
-       int i;
-
-       /* For empty matcher, ZERO_SIZE_PTR is returned */
-       if (fc == ZERO_SIZE_PTR)
-               return 0;
-
-       for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++)
-               if (fc[i].tag_set)
-                       fc_sz++;
-       return fc_sz;
-}
-
-static struct mlx5hws_definer_fc *
-hws_definer_alloc_compressed_fc(struct mlx5hws_definer_fc *fc)
-{
-       struct mlx5hws_definer_fc *compressed_fc = NULL;
-       u32 definer_size = hws_definer_get_fc_size(fc);
-       u32 fc_sz = 0;
-       int i;
-
-       compressed_fc = kcalloc(definer_size, sizeof(*compressed_fc), GFP_KERNEL);
-       if (!compressed_fc)
-               return NULL;
-
-       /* For empty matcher, ZERO_SIZE_PTR is returned */
-       if (!definer_size)
-               return compressed_fc;
-
-       for (i = 0, fc_sz = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) {
-               if (!fc[i].tag_set)
-                       continue;
-
-               fc[i].fname = i;
-               memcpy(&compressed_fc[fc_sz++], &fc[i], sizeof(*compressed_fc));
-       }
-
-       return compressed_fc;
-}
-
-static void
-hws_definer_set_hl(u8 *hl, struct mlx5hws_definer_fc *fc)
-{
-       int i;
-
-       /* nothing to do for empty matcher */
-       if (fc == ZERO_SIZE_PTR)
-               return;
-
-       for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) {
-               if (!fc[i].tag_set)
-                       continue;
-
-               HWS_SET32(hl, -1, fc[i].byte_off, fc[i].bit_off, fc[i].bit_mask);
-       }
-}
-
-static struct mlx5hws_definer_fc *
-hws_definer_alloc_fc(struct mlx5hws_context *ctx,
-                    size_t len)
-{
-       struct mlx5hws_definer_fc *fc;
-       int i;
-
-       fc = kcalloc(len, sizeof(*fc), GFP_KERNEL);
-       if (!fc)
-               return NULL;
-
-       for (i = 0; i < len; i++)
-               fc[i].ctx = ctx;
-
-       return fc;
-}
-
-static int
-hws_definer_conv_match_params_to_hl(struct mlx5hws_context *ctx,
-                                   struct mlx5hws_match_template *mt,
-                                   u8 *hl)
-{
-       struct mlx5hws_definer_conv_data cd = {0};
-       struct mlx5hws_definer_fc *fc;
-       int ret;
-
-       fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX);
-       if (!fc)
-               return -ENOMEM;
-
-       cd.fc = fc;
-       cd.ctx = ctx;
-
-       if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC6) {
-               mlx5hws_err(ctx, "Unsupported match_criteria_enable provided\n");
-               ret = -EOPNOTSUPP;
-               goto err_free_fc;
-       }
-
-       if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) {
-               ret = hws_definer_conv_outer(&cd, mt->match_param);
-               if (ret)
-                       goto err_free_fc;
-       }
-
-       if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) {
-               ret = hws_definer_conv_inner(&cd, mt->match_param);
-               if (ret)
-                       goto err_free_fc;
-       }
-
-       if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) {
-               ret = hws_definer_conv_misc(&cd, mt->match_param);
-               if (ret)
-                       goto err_free_fc;
-       }
-
-       if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) {
-               ret = hws_definer_conv_misc2(&cd, mt->match_param);
-               if (ret)
-                       goto err_free_fc;
-       }
-
-       if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) {
-               ret = hws_definer_conv_misc3(&cd, mt->match_param);
-               if (ret)
-                       goto err_free_fc;
-       }
-
-       if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) {
-               ret = hws_definer_conv_misc4(&cd, mt->match_param);
-               if (ret)
-                       goto err_free_fc;
-       }
-
-       if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) {
-               ret = hws_definer_conv_misc5(&cd, mt->match_param);
-               if (ret)
-                       goto err_free_fc;
-       }
-
-       /* Check there is no conflicted fields set together */
-       ret = hws_definer_check_match_flags(&cd);
-       if (ret)
-               goto err_free_fc;
-
-       /* Allocate fc array on mt */
-       mt->fc = hws_definer_alloc_compressed_fc(fc);
-       if (!mt->fc) {
-               mlx5hws_err(ctx,
-                           "Convert match params: failed to set field copy to match template\n");
-               ret = -ENOMEM;
-               goto err_free_fc;
-       }
-       mt->fc_sz = hws_definer_get_fc_size(fc);
-
-       /* Fill in headers layout */
-       hws_definer_set_hl(hl, fc);
-
-       kfree(fc);
-       return 0;
-
-err_free_fc:
-       kfree(fc);
-       return ret;
-}
-
-struct mlx5hws_definer_fc *
-mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
-                                                  u8 match_criteria_enable,
-                                                  u32 *match_param,
-                                                  int *fc_sz)
-{
-       struct mlx5hws_definer_fc *compressed_fc = NULL;
-       struct mlx5hws_definer_conv_data cd = {0};
-       struct mlx5hws_definer_fc *fc;
-       int ret;
-
-       fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX);
-       if (!fc)
-               return NULL;
-
-       cd.fc = fc;
-       cd.ctx = ctx;
-
-       if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) {
-               ret = hws_definer_conv_outer(&cd, match_param);
-               if (ret)
-                       goto err_free_fc;
-       }
-
-       if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) {
-               ret = hws_definer_conv_inner(&cd, match_param);
-               if (ret)
-                       goto err_free_fc;
-       }
-
-       if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) {
-               ret = hws_definer_conv_misc(&cd, match_param);
-               if (ret)
-                       goto err_free_fc;
-       }
-
-       if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) {
-               ret = hws_definer_conv_misc2(&cd, match_param);
-               if (ret)
-                       goto err_free_fc;
-       }
-
-       if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) {
-               ret = hws_definer_conv_misc3(&cd, match_param);
-               if (ret)
-                       goto err_free_fc;
-       }
-
-       if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) {
-               ret = hws_definer_conv_misc4(&cd, match_param);
-               if (ret)
-                       goto err_free_fc;
-       }
-
-       if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) {
-               ret = hws_definer_conv_misc5(&cd, match_param);
-               if (ret)
-                       goto err_free_fc;
-       }
-
-       /* Allocate fc array on mt */
-       compressed_fc = hws_definer_alloc_compressed_fc(fc);
-       if (!compressed_fc) {
-               mlx5hws_err(ctx,
-                           "Convert to compressed fc: failed to set field copy to match template\n");
-               goto err_free_fc;
-       }
-       *fc_sz = hws_definer_get_fc_size(fc);
-
-err_free_fc:
-       kfree(fc);
-       return compressed_fc;
-}
-
-static int
-hws_definer_find_byte_in_tag(struct mlx5hws_definer *definer,
-                            u32 hl_byte_off,
-                            u32 *tag_byte_off)
-{
-       int i, dw_to_scan;
-       u8 byte_offset;
-
-       /* Avoid accessing unused DW selectors */
-       dw_to_scan = mlx5hws_definer_is_jumbo(definer) ?
-               DW_SELECTORS : DW_SELECTORS_MATCH;
-
-       /* Add offset since each DW covers multiple BYTEs */
-       byte_offset = hl_byte_off % DW_SIZE;
-       for (i = 0; i < dw_to_scan; i++) {
-               if (definer->dw_selector[i] == hl_byte_off / DW_SIZE) {
-                       *tag_byte_off = byte_offset + DW_SIZE * (DW_SELECTORS - i - 1);
-                       return 0;
-               }
-       }
-
-       /* Add offset to skip DWs in definer */
-       byte_offset = DW_SIZE * DW_SELECTORS;
-       /* Iterate in reverse since the code uses bytes from 7 -> 0 */
-       for (i = BYTE_SELECTORS; i-- > 0 ;) {
-               if (definer->byte_selector[i] == hl_byte_off) {
-                       *tag_byte_off = byte_offset + (BYTE_SELECTORS - i - 1);
-                       return 0;
-               }
-       }
-
-       return -EINVAL;
-}
-
-static int
-hws_definer_fc_bind(struct mlx5hws_definer *definer,
-                   struct mlx5hws_definer_fc *fc,
-                   u32 fc_sz)
-{
-       u32 tag_offset = 0;
-       int ret, byte_diff;
-       u32 i;
-
-       for (i = 0; i < fc_sz; i++) {
-               /* Map header layout byte offset to byte offset in tag */
-               ret = hws_definer_find_byte_in_tag(definer, fc->byte_off, &tag_offset);
-               if (ret)
-                       return ret;
-
-               /* Move setter based on the location in the definer */
-               byte_diff = fc->byte_off % DW_SIZE - tag_offset % DW_SIZE;
-               fc->bit_off = fc->bit_off + byte_diff * BITS_IN_BYTE;
-
-               /* Update offset in headers layout to offset in tag */
-               fc->byte_off = tag_offset;
-               fc++;
-       }
-
-       return 0;
-}
-
-static bool
-hws_definer_best_hl_fit_recu(struct mlx5hws_definer_sel_ctrl *ctrl,
-                            u32 cur_dw,
-                            u32 *data)
-{
-       u8 bytes_set;
-       int byte_idx;
-       bool ret;
-       int i;
-
-       /* Reached end, nothing left to do */
-       if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
-               return true;
-
-       /* No data set, can skip to next DW */
-       while (!*data) {
-               cur_dw++;
-               data++;
-
-               /* Reached end, nothing left to do */
-               if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
-                       return true;
-       }
-
-       /* Used all DW selectors and Byte selectors, no possible solution */
-       if (ctrl->allowed_full_dw == ctrl->used_full_dw &&
-           ctrl->allowed_lim_dw == ctrl->used_lim_dw &&
-           ctrl->allowed_bytes == ctrl->used_bytes)
-               return false;
-
-       /* Try to use limited DW selectors */
-       if (ctrl->allowed_lim_dw > ctrl->used_lim_dw && cur_dw < 64) {
-               ctrl->lim_dw_selector[ctrl->used_lim_dw++] = cur_dw;
-
-               ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
-               if (ret)
-                       return ret;
-
-               ctrl->lim_dw_selector[--ctrl->used_lim_dw] = 0;
-       }
-
-       /* Try to use DW selectors */
-       if (ctrl->allowed_full_dw > ctrl->used_full_dw) {
-               ctrl->full_dw_selector[ctrl->used_full_dw++] = cur_dw;
-
-               ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
-               if (ret)
-                       return ret;
-
-               ctrl->full_dw_selector[--ctrl->used_full_dw] = 0;
-       }
-
-       /* No byte selector for offset bigger than 255 */
-       if (cur_dw * DW_SIZE > 255)
-               return false;
-
-       bytes_set = !!(0x000000ff & *data) +
-                   !!(0x0000ff00 & *data) +
-                   !!(0x00ff0000 & *data) +
-                   !!(0xff000000 & *data);
-
-       /* Check if there are enough byte selectors left */
-       if (bytes_set + ctrl->used_bytes > ctrl->allowed_bytes)
-               return false;
-
-       /* Try to use Byte selectors */
-       for (i = 0; i < DW_SIZE; i++)
-               if ((0xff000000 >> (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) {
-                       /* Use byte selectors high to low */
-                       byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
-                       ctrl->byte_selector[byte_idx] = cur_dw * DW_SIZE + i;
-                       ctrl->used_bytes++;
-               }
-
-       ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
-       if (ret)
-               return ret;
-
-       for (i = 0; i < DW_SIZE; i++)
-               if ((0xff << (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) {
-                       ctrl->used_bytes--;
-                       byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
-                       ctrl->byte_selector[byte_idx] = 0;
-               }
-
-       return false;
-}
-
-static void
-hws_definer_copy_sel_ctrl(struct mlx5hws_definer_sel_ctrl *ctrl,
-                         struct mlx5hws_definer *definer)
-{
-       memcpy(definer->byte_selector, ctrl->byte_selector, ctrl->allowed_bytes);
-       memcpy(definer->dw_selector, ctrl->full_dw_selector, ctrl->allowed_full_dw);
-       memcpy(definer->dw_selector + ctrl->allowed_full_dw,
-              ctrl->lim_dw_selector, ctrl->allowed_lim_dw);
-}
-
-static int
-hws_definer_find_best_match_fit(struct mlx5hws_context *ctx,
-                               struct mlx5hws_definer *definer,
-                               u8 *hl)
-{
-       struct mlx5hws_definer_sel_ctrl ctrl = {0};
-       bool found;
-
-       /* Try to create a match definer */
-       ctrl.allowed_full_dw = DW_SELECTORS_MATCH;
-       ctrl.allowed_lim_dw = 0;
-       ctrl.allowed_bytes = BYTE_SELECTORS;
-
-       found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl);
-       if (found) {
-               hws_definer_copy_sel_ctrl(&ctrl, definer);
-               definer->type = MLX5HWS_DEFINER_TYPE_MATCH;
-               return 0;
-       }
-
-       /* Try to create a full/limited jumbo definer */
-       ctrl.allowed_full_dw = ctx->caps->full_dw_jumbo_support ? DW_SELECTORS :
-                                                                 DW_SELECTORS_MATCH;
-       ctrl.allowed_lim_dw = ctx->caps->full_dw_jumbo_support ? 0 :
-                                                                DW_SELECTORS_LIMITED;
-       ctrl.allowed_bytes = BYTE_SELECTORS;
-
-       found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl);
-       if (found) {
-               hws_definer_copy_sel_ctrl(&ctrl, definer);
-               definer->type = MLX5HWS_DEFINER_TYPE_JUMBO;
-               return 0;
-       }
-
-       return -E2BIG;
-}
-
-static void
-hws_definer_create_tag_mask(u32 *match_param,
-                           struct mlx5hws_definer_fc *fc,
-                           u32 fc_sz,
-                           u8 *tag)
-{
-       u32 i;
-
-       for (i = 0; i < fc_sz; i++) {
-               if (fc->tag_mask_set)
-                       fc->tag_mask_set(fc, match_param, tag);
-               else
-                       fc->tag_set(fc, match_param, tag);
-               fc++;
-       }
-}
-
-void mlx5hws_definer_create_tag(u32 *match_param,
-                               struct mlx5hws_definer_fc *fc,
-                               u32 fc_sz,
-                               u8 *tag)
-{
-       u32 i;
-
-       for (i = 0; i < fc_sz; i++) {
-               fc->tag_set(fc, match_param, tag);
-               fc++;
-       }
-}
-
-int mlx5hws_definer_get_id(struct mlx5hws_definer *definer)
-{
-       return definer->obj_id;
-}
-
-int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a,
-                           struct mlx5hws_definer *definer_b)
-{
-       int i;
-
-       /* Future: Optimize by comparing selectors with valid mask only */
-       for (i = 0; i < BYTE_SELECTORS; i++)
-               if (definer_a->byte_selector[i] != definer_b->byte_selector[i])
-                       return 1;
-
-       for (i = 0; i < DW_SELECTORS; i++)
-               if (definer_a->dw_selector[i] != definer_b->dw_selector[i])
-                       return 1;
-
-       for (i = 0; i < MLX5HWS_JUMBO_TAG_SZ; i++)
-               if (definer_a->mask.jumbo[i] != definer_b->mask.jumbo[i])
-                       return 1;
-
-       return 0;
-}
-
-int
-mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
-                           struct mlx5hws_match_template *mt,
-                           struct mlx5hws_definer *match_definer)
-{
-       u8 *match_hl;
-       int ret;
-
-       /* Union header-layout (hl) is used for creating a single definer
-        * field layout used with different bitmasks for hash and match.
-        */
-       match_hl = kzalloc(MLX5_ST_SZ_BYTES(definer_hl), GFP_KERNEL);
-       if (!match_hl)
-               return -ENOMEM;
-
-       /* Convert all mt items to header layout (hl)
-        * and allocate the match and range field copy array (fc & fcr).
-        */
-       ret = hws_definer_conv_match_params_to_hl(ctx, mt, match_hl);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to convert items to header layout\n");
-               goto free_match_hl;
-       }
-
-       /* Find the match definer layout for header layout match union */
-       ret = hws_definer_find_best_match_fit(ctx, match_definer, match_hl);
-       if (ret) {
-               if (ret == -E2BIG)
-                       mlx5hws_dbg(ctx,
-                                   "Failed to create match definer from header layout - E2BIG\n");
-               else
-                       mlx5hws_err(ctx,
-                                   "Failed to create match definer from header layout (%d)\n",
-                                   ret);
-               goto free_fc;
-       }
-
-       kfree(match_hl);
-       return 0;
-
-free_fc:
-       kfree(mt->fc);
-free_match_hl:
-       kfree(match_hl);
-       return ret;
-}
-
-int mlx5hws_definer_init_cache(struct mlx5hws_definer_cache **cache)
-{
-       struct mlx5hws_definer_cache *new_cache;
-
-       new_cache = kzalloc(sizeof(*new_cache), GFP_KERNEL);
-       if (!new_cache)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&new_cache->list_head);
-       *cache = new_cache;
-
-       return 0;
-}
-
-void mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache *cache)
-{
-       kfree(cache);
-}
-
-int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx,
-                           struct mlx5hws_definer *definer)
-{
-       struct mlx5hws_definer_cache *cache = ctx->definer_cache;
-       struct mlx5hws_cmd_definer_create_attr def_attr = {0};
-       struct mlx5hws_definer_cache_item *cached_definer;
-       u32 obj_id;
-       int ret;
-
-       /* Search definer cache for requested definer */
-       list_for_each_entry(cached_definer, &cache->list_head, list_node) {
-               if (mlx5hws_definer_compare(&cached_definer->definer, definer))
-                       continue;
-
-               /* Reuse definer and set LRU (move to be first in the list) */
-               list_del_init(&cached_definer->list_node);
-               list_add(&cached_definer->list_node, &cache->list_head);
-               cached_definer->refcount++;
-               return cached_definer->definer.obj_id;
-       }
-
-       /* Allocate and create definer based on the bitmask tag */
-       def_attr.match_mask = definer->mask.jumbo;
-       def_attr.dw_selector = definer->dw_selector;
-       def_attr.byte_selector = definer->byte_selector;
-
-       ret = mlx5hws_cmd_definer_create(ctx->mdev, &def_attr, &obj_id);
-       if (ret)
-               return -1;
-
-       cached_definer = kzalloc(sizeof(*cached_definer), GFP_KERNEL);
-       if (!cached_definer)
-               goto free_definer_obj;
-
-       memcpy(&cached_definer->definer, definer, sizeof(*definer));
-       cached_definer->definer.obj_id = obj_id;
-       cached_definer->refcount = 1;
-       list_add(&cached_definer->list_node, &cache->list_head);
-
-       return obj_id;
-
-free_definer_obj:
-       mlx5hws_cmd_definer_destroy(ctx->mdev, obj_id);
-       return -1;
-}
-
-static void
-hws_definer_put_obj(struct mlx5hws_context *ctx, u32 obj_id)
-{
-       struct mlx5hws_definer_cache_item *cached_definer;
-
-       list_for_each_entry(cached_definer, &ctx->definer_cache->list_head, list_node) {
-               if (cached_definer->definer.obj_id != obj_id)
-                       continue;
-
-               /* Object found */
-               if (--cached_definer->refcount)
-                       return;
-
-               list_del_init(&cached_definer->list_node);
-               mlx5hws_cmd_definer_destroy(ctx->mdev, cached_definer->definer.obj_id);
-               kfree(cached_definer);
-               return;
-       }
-
-       /* Programming error, object must be part of cache */
-       pr_warn("HWS: failed putting definer object\n");
-}
-
-static struct mlx5hws_definer *
-hws_definer_alloc(struct mlx5hws_context *ctx,
-                 struct mlx5hws_definer_fc *fc,
-                 int fc_sz,
-                 u32 *match_param,
-                 struct mlx5hws_definer *layout,
-                 bool bind_fc)
-{
-       struct mlx5hws_definer *definer;
-       int ret;
-
-       definer = kmemdup(layout, sizeof(*definer), GFP_KERNEL);
-       if (!definer)
-               return NULL;
-
-       /* Align field copy array based on given layout */
-       if (bind_fc) {
-               ret = hws_definer_fc_bind(definer, fc, fc_sz);
-               if (ret) {
-                       mlx5hws_err(ctx, "Failed to bind field copy to definer\n");
-                       goto free_definer;
-               }
-       }
-
-       /* Create the tag mask used for definer creation */
-       hws_definer_create_tag_mask(match_param, fc, fc_sz, definer->mask.jumbo);
-
-       ret = mlx5hws_definer_get_obj(ctx, definer);
-       if (ret < 0)
-               goto free_definer;
-
-       definer->obj_id = ret;
-       return definer;
-
-free_definer:
-       kfree(definer);
-       return NULL;
-}
-
-void mlx5hws_definer_free(struct mlx5hws_context *ctx,
-                         struct mlx5hws_definer *definer)
-{
-       hws_definer_put_obj(ctx, definer->obj_id);
-       kfree(definer);
-}
-
-static int
-hws_definer_mt_match_init(struct mlx5hws_context *ctx,
-                         struct mlx5hws_match_template *mt,
-                         struct mlx5hws_definer *match_layout)
-{
-       /* Create mandatory match definer */
-       mt->definer = hws_definer_alloc(ctx,
-                                       mt->fc,
-                                       mt->fc_sz,
-                                       mt->match_param,
-                                       match_layout,
-                                       true);
-       if (!mt->definer) {
-               mlx5hws_err(ctx, "Failed to create match definer\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static void
-hws_definer_mt_match_uninit(struct mlx5hws_context *ctx,
-                           struct mlx5hws_match_template *mt)
-{
-       mlx5hws_definer_free(ctx, mt->definer);
-}
-
-int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx,
-                           struct mlx5hws_match_template *mt)
-{
-       struct mlx5hws_definer match_layout = {0};
-       int ret;
-
-       ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
-               return ret;
-       }
-
-       /* Calculate definers needed for exact match */
-       ret = hws_definer_mt_match_init(ctx, mt, &match_layout);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to init match definers\n");
-               goto free_fc;
-       }
-
-       return 0;
-
-free_fc:
-       kfree(mt->fc);
-       return ret;
-}
-
-void mlx5hws_definer_mt_uninit(struct mlx5hws_context *ctx,
-                              struct mlx5hws_match_template *mt)
-{
-       hws_definer_mt_match_uninit(ctx, mt);
-       kfree(mt->fc);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h
deleted file mode 100644 (file)
index 2f6a7df..0000000
+++ /dev/null
@@ -1,834 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#ifndef MLX5HWS_DEFINER_H_
-#define MLX5HWS_DEFINER_H_
-
-/* Max available selecotrs */
-#define DW_SELECTORS 9
-#define BYTE_SELECTORS 8
-
-/* Selectors based on match TAG */
-#define DW_SELECTORS_MATCH 6
-#define DW_SELECTORS_LIMITED 3
-
-/* Selectors based on range TAG */
-#define DW_SELECTORS_RANGE 2
-#define BYTE_SELECTORS_RANGE 8
-
-#define HWS_NUM_OF_FLEX_PARSERS 8
-
-enum mlx5hws_definer_fname {
-       MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O,
-       MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I,
-       MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O,
-       MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I,
-       MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O,
-       MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I,
-       MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O,
-       MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I,
-       MLX5HWS_DEFINER_FNAME_ETH_TYPE_O,
-       MLX5HWS_DEFINER_FNAME_ETH_TYPE_I,
-       MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O,
-       MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I,
-       MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O,
-       MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I,
-       MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_O,
-       MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_I,
-       MLX5HWS_DEFINER_FNAME_VLAN_CFI_O,
-       MLX5HWS_DEFINER_FNAME_VLAN_CFI_I,
-       MLX5HWS_DEFINER_FNAME_VLAN_ID_O,
-       MLX5HWS_DEFINER_FNAME_VLAN_ID_I,
-       MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O,
-       MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I,
-       MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_O,
-       MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_I,
-       MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_O,
-       MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_I,
-       MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_O,
-       MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_I,
-       MLX5HWS_DEFINER_FNAME_IPV4_IHL_O,
-       MLX5HWS_DEFINER_FNAME_IPV4_IHL_I,
-       MLX5HWS_DEFINER_FNAME_IP_DSCP_O,
-       MLX5HWS_DEFINER_FNAME_IP_DSCP_I,
-       MLX5HWS_DEFINER_FNAME_IP_ECN_O,
-       MLX5HWS_DEFINER_FNAME_IP_ECN_I,
-       MLX5HWS_DEFINER_FNAME_IP_TTL_O,
-       MLX5HWS_DEFINER_FNAME_IP_TTL_I,
-       MLX5HWS_DEFINER_FNAME_IPV4_DST_O,
-       MLX5HWS_DEFINER_FNAME_IPV4_DST_I,
-       MLX5HWS_DEFINER_FNAME_IPV4_SRC_O,
-       MLX5HWS_DEFINER_FNAME_IPV4_SRC_I,
-       MLX5HWS_DEFINER_FNAME_IP_VERSION_O,
-       MLX5HWS_DEFINER_FNAME_IP_VERSION_I,
-       MLX5HWS_DEFINER_FNAME_IP_FRAG_O,
-       MLX5HWS_DEFINER_FNAME_IP_FRAG_I,
-       MLX5HWS_DEFINER_FNAME_IP_LEN_O,
-       MLX5HWS_DEFINER_FNAME_IP_LEN_I,
-       MLX5HWS_DEFINER_FNAME_IP_TOS_O,
-       MLX5HWS_DEFINER_FNAME_IP_TOS_I,
-       MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_O,
-       MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_I,
-       MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O,
-       MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O,
-       MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O,
-       MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O,
-       MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I,
-       MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I,
-       MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I,
-       MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I,
-       MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O,
-       MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O,
-       MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O,
-       MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O,
-       MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I,
-       MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I,
-       MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I,
-       MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I,
-       MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_O,
-       MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_I,
-       MLX5HWS_DEFINER_FNAME_L4_SPORT_O,
-       MLX5HWS_DEFINER_FNAME_L4_SPORT_I,
-       MLX5HWS_DEFINER_FNAME_L4_DPORT_O,
-       MLX5HWS_DEFINER_FNAME_L4_DPORT_I,
-       MLX5HWS_DEFINER_FNAME_TCP_FLAGS_I,
-       MLX5HWS_DEFINER_FNAME_TCP_FLAGS_O,
-       MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM,
-       MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM,
-       MLX5HWS_DEFINER_FNAME_GTP_TEID,
-       MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE,
-       MLX5HWS_DEFINER_FNAME_GTP_EXT_FLAG,
-       MLX5HWS_DEFINER_FNAME_GTP_NEXT_EXT_HDR,
-       MLX5HWS_DEFINER_FNAME_GTP_EXT_HDR_PDU,
-       MLX5HWS_DEFINER_FNAME_GTP_EXT_HDR_QFI,
-       MLX5HWS_DEFINER_FNAME_GTPU_DW0,
-       MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0,
-       MLX5HWS_DEFINER_FNAME_GTPU_DW2,
-       MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0,
-       MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1,
-       MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2,
-       MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3,
-       MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4,
-       MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5,
-       MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6,
-       MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7,
-       MLX5HWS_DEFINER_FNAME_VPORT_REG_C_0,
-       MLX5HWS_DEFINER_FNAME_VXLAN_FLAGS,
-       MLX5HWS_DEFINER_FNAME_VXLAN_VNI,
-       MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS,
-       MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD0,
-       MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO,
-       MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI,
-       MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD1,
-       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN,
-       MLX5HWS_DEFINER_FNAME_GENEVE_OAM,
-       MLX5HWS_DEFINER_FNAME_GENEVE_PROTO,
-       MLX5HWS_DEFINER_FNAME_GENEVE_VNI,
-       MLX5HWS_DEFINER_FNAME_SOURCE_QP,
-       MLX5HWS_DEFINER_FNAME_SOURCE_GVMI,
-       MLX5HWS_DEFINER_FNAME_REG_0,
-       MLX5HWS_DEFINER_FNAME_REG_1,
-       MLX5HWS_DEFINER_FNAME_REG_2,
-       MLX5HWS_DEFINER_FNAME_REG_3,
-       MLX5HWS_DEFINER_FNAME_REG_4,
-       MLX5HWS_DEFINER_FNAME_REG_5,
-       MLX5HWS_DEFINER_FNAME_REG_6,
-       MLX5HWS_DEFINER_FNAME_REG_7,
-       MLX5HWS_DEFINER_FNAME_REG_8,
-       MLX5HWS_DEFINER_FNAME_REG_9,
-       MLX5HWS_DEFINER_FNAME_REG_10,
-       MLX5HWS_DEFINER_FNAME_REG_11,
-       MLX5HWS_DEFINER_FNAME_REG_A,
-       MLX5HWS_DEFINER_FNAME_REG_B,
-       MLX5HWS_DEFINER_FNAME_GRE_KEY_PRESENT,
-       MLX5HWS_DEFINER_FNAME_GRE_C,
-       MLX5HWS_DEFINER_FNAME_GRE_K,
-       MLX5HWS_DEFINER_FNAME_GRE_S,
-       MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL,
-       MLX5HWS_DEFINER_FNAME_GRE_OPT_KEY,
-       MLX5HWS_DEFINER_FNAME_GRE_OPT_SEQ,
-       MLX5HWS_DEFINER_FNAME_GRE_OPT_CHECKSUM,
-       MLX5HWS_DEFINER_FNAME_INTEGRITY_O,
-       MLX5HWS_DEFINER_FNAME_INTEGRITY_I,
-       MLX5HWS_DEFINER_FNAME_ICMP_DW1,
-       MLX5HWS_DEFINER_FNAME_ICMP_DW2,
-       MLX5HWS_DEFINER_FNAME_ICMP_DW3,
-       MLX5HWS_DEFINER_FNAME_IPSEC_SPI,
-       MLX5HWS_DEFINER_FNAME_IPSEC_SEQUENCE_NUMBER,
-       MLX5HWS_DEFINER_FNAME_IPSEC_SYNDROME,
-       MLX5HWS_DEFINER_FNAME_MPLS0_O,
-       MLX5HWS_DEFINER_FNAME_MPLS1_O,
-       MLX5HWS_DEFINER_FNAME_MPLS2_O,
-       MLX5HWS_DEFINER_FNAME_MPLS3_O,
-       MLX5HWS_DEFINER_FNAME_MPLS4_O,
-       MLX5HWS_DEFINER_FNAME_MPLS0_I,
-       MLX5HWS_DEFINER_FNAME_MPLS1_I,
-       MLX5HWS_DEFINER_FNAME_MPLS2_I,
-       MLX5HWS_DEFINER_FNAME_MPLS3_I,
-       MLX5HWS_DEFINER_FNAME_MPLS4_I,
-       MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK,
-       MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK,
-       MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK,
-       MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK,
-       MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK,
-       MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK,
-       MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK,
-       MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK,
-       MLX5HWS_DEFINER_FNAME_OKS2_MPLS0_O,
-       MLX5HWS_DEFINER_FNAME_OKS2_MPLS1_O,
-       MLX5HWS_DEFINER_FNAME_OKS2_MPLS2_O,
-       MLX5HWS_DEFINER_FNAME_OKS2_MPLS3_O,
-       MLX5HWS_DEFINER_FNAME_OKS2_MPLS4_O,
-       MLX5HWS_DEFINER_FNAME_OKS2_MPLS0_I,
-       MLX5HWS_DEFINER_FNAME_OKS2_MPLS1_I,
-       MLX5HWS_DEFINER_FNAME_OKS2_MPLS2_I,
-       MLX5HWS_DEFINER_FNAME_OKS2_MPLS3_I,
-       MLX5HWS_DEFINER_FNAME_OKS2_MPLS4_I,
-       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_0,
-       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_1,
-       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_2,
-       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_3,
-       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_4,
-       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_5,
-       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_6,
-       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_7,
-       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_0,
-       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_1,
-       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_2,
-       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_3,
-       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_4,
-       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_5,
-       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_6,
-       MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_7,
-       MLX5HWS_DEFINER_FNAME_IB_L4_OPCODE,
-       MLX5HWS_DEFINER_FNAME_IB_L4_QPN,
-       MLX5HWS_DEFINER_FNAME_IB_L4_A,
-       MLX5HWS_DEFINER_FNAME_RANDOM_NUM,
-       MLX5HWS_DEFINER_FNAME_PTYPE_L2_O,
-       MLX5HWS_DEFINER_FNAME_PTYPE_L2_I,
-       MLX5HWS_DEFINER_FNAME_PTYPE_L3_O,
-       MLX5HWS_DEFINER_FNAME_PTYPE_L3_I,
-       MLX5HWS_DEFINER_FNAME_PTYPE_L4_O,
-       MLX5HWS_DEFINER_FNAME_PTYPE_L4_I,
-       MLX5HWS_DEFINER_FNAME_PTYPE_L4_EXT_O,
-       MLX5HWS_DEFINER_FNAME_PTYPE_L4_EXT_I,
-       MLX5HWS_DEFINER_FNAME_PTYPE_FRAG_O,
-       MLX5HWS_DEFINER_FNAME_PTYPE_FRAG_I,
-       MLX5HWS_DEFINER_FNAME_TNL_HDR_0,
-       MLX5HWS_DEFINER_FNAME_TNL_HDR_1,
-       MLX5HWS_DEFINER_FNAME_TNL_HDR_2,
-       MLX5HWS_DEFINER_FNAME_TNL_HDR_3,
-       MLX5HWS_DEFINER_FNAME_MAX,
-};
-
-enum mlx5hws_definer_match_criteria {
-       MLX5HWS_DEFINER_MATCH_CRITERIA_EMPTY = 0,
-       MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER = 1 << 0,
-       MLX5HWS_DEFINER_MATCH_CRITERIA_MISC = 1 << 1,
-       MLX5HWS_DEFINER_MATCH_CRITERIA_INNER = 1 << 2,
-       MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2 = 1 << 3,
-       MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3 = 1 << 4,
-       MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4 = 1 << 5,
-       MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5 = 1 << 6,
-       MLX5HWS_DEFINER_MATCH_CRITERIA_MISC6 = 1 << 7,
-};
-
-enum mlx5hws_definer_type {
-       MLX5HWS_DEFINER_TYPE_MATCH,
-       MLX5HWS_DEFINER_TYPE_JUMBO,
-};
-
-enum mlx5hws_definer_match_flag {
-       MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE = 1 << 0,
-       MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE = 1 << 1,
-       MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU = 1 << 2,
-       MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE = 1 << 3,
-       MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN = 1 << 4,
-       MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1 = 1 << 5,
-
-       MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY = 1 << 6,
-       MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2 = 1 << 7,
-
-       MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE = 1 << 8,
-       MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP = 1 << 9,
-
-       MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4 = 1 << 10,
-       MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6 = 1 << 11,
-       MLX5HWS_DEFINER_MATCH_FLAG_TCP_O = 1 << 12,
-       MLX5HWS_DEFINER_MATCH_FLAG_TCP_I = 1 << 13,
-};
-
-struct mlx5hws_definer_fc {
-       struct mlx5hws_context *ctx;
-       /* Source */
-       u32 s_byte_off;
-       int s_bit_off;
-       u32 s_bit_mask;
-       /* Destination */
-       u32 byte_off;
-       int bit_off;
-       u32 bit_mask;
-       enum mlx5hws_definer_fname fname;
-       void (*tag_set)(struct mlx5hws_definer_fc *fc,
-                       void *mach_param,
-                       u8 *tag);
-       void (*tag_mask_set)(struct mlx5hws_definer_fc *fc,
-                            void *mach_param,
-                            u8 *tag);
-};
-
-struct mlx5_ifc_definer_hl_eth_l2_bits {
-       u8 dmac_47_16[0x20];
-       u8 dmac_15_0[0x10];
-       u8 l3_ethertype[0x10];
-       u8 reserved_at_40[0x1];
-       u8 sx_sniffer[0x1];
-       u8 functional_lb[0x1];
-       u8 ip_fragmented[0x1];
-       u8 qp_type[0x2];
-       u8 encap_type[0x2];
-       u8 port_number[0x2];
-       u8 l3_type[0x2];
-       u8 l4_type_bwc[0x2];
-       u8 first_vlan_qualifier[0x2];
-       u8 first_priority[0x3];
-       u8 first_cfi[0x1];
-       u8 first_vlan_id[0xc];
-       u8 l4_type[0x4];
-       u8 reserved_at_64[0x2];
-       u8 ipsec_layer[0x2];
-       u8 l2_type[0x2];
-       u8 force_lb[0x1];
-       u8 l2_ok[0x1];
-       u8 l3_ok[0x1];
-       u8 l4_ok[0x1];
-       u8 second_vlan_qualifier[0x2];
-       u8 second_priority[0x3];
-       u8 second_cfi[0x1];
-       u8 second_vlan_id[0xc];
-};
-
-struct mlx5_ifc_definer_hl_eth_l2_src_bits {
-       u8 smac_47_16[0x20];
-       u8 smac_15_0[0x10];
-       u8 loopback_syndrome[0x8];
-       u8 l3_type[0x2];
-       u8 l4_type_bwc[0x2];
-       u8 first_vlan_qualifier[0x2];
-       u8 ip_fragmented[0x1];
-       u8 functional_lb[0x1];
-};
-
-struct mlx5_ifc_definer_hl_ib_l2_bits {
-       u8 sx_sniffer[0x1];
-       u8 force_lb[0x1];
-       u8 functional_lb[0x1];
-       u8 reserved_at_3[0x3];
-       u8 port_number[0x2];
-       u8 sl[0x4];
-       u8 qp_type[0x2];
-       u8 lnh[0x2];
-       u8 dlid[0x10];
-       u8 vl[0x4];
-       u8 lrh_packet_length[0xc];
-       u8 slid[0x10];
-};
-
-struct mlx5_ifc_definer_hl_eth_l3_bits {
-       u8 ip_version[0x4];
-       u8 ihl[0x4];
-       union {
-               u8 tos[0x8];
-               struct {
-                       u8 dscp[0x6];
-                       u8 ecn[0x2];
-               };
-       };
-       u8 time_to_live_hop_limit[0x8];
-       u8 protocol_next_header[0x8];
-       u8 identification[0x10];
-       union {
-               u8 ipv4_frag[0x10];
-               struct {
-                       u8 flags[0x3];
-                       u8 fragment_offset[0xd];
-               };
-       };
-       u8 ipv4_total_length[0x10];
-       u8 checksum[0x10];
-       u8 reserved_at_60[0xc];
-       u8 flow_label[0x14];
-       u8 packet_length[0x10];
-       u8 ipv6_payload_length[0x10];
-};
-
-struct mlx5_ifc_definer_hl_eth_l4_bits {
-       u8 source_port[0x10];
-       u8 destination_port[0x10];
-       u8 data_offset[0x4];
-       u8 l4_ok[0x1];
-       u8 l3_ok[0x1];
-       u8 ip_fragmented[0x1];
-       u8 tcp_ns[0x1];
-       union {
-               u8 tcp_flags[0x8];
-               struct {
-                       u8 tcp_cwr[0x1];
-                       u8 tcp_ece[0x1];
-                       u8 tcp_urg[0x1];
-                       u8 tcp_ack[0x1];
-                       u8 tcp_psh[0x1];
-                       u8 tcp_rst[0x1];
-                       u8 tcp_syn[0x1];
-                       u8 tcp_fin[0x1];
-               };
-       };
-       u8 first_fragment[0x1];
-       u8 reserved_at_31[0xf];
-};
-
-struct mlx5_ifc_definer_hl_src_qp_gvmi_bits {
-       u8 loopback_syndrome[0x8];
-       u8 l3_type[0x2];
-       u8 l4_type_bwc[0x2];
-       u8 first_vlan_qualifier[0x2];
-       u8 reserved_at_e[0x1];
-       u8 functional_lb[0x1];
-       u8 source_gvmi[0x10];
-       u8 force_lb[0x1];
-       u8 ip_fragmented[0x1];
-       u8 source_is_requestor[0x1];
-       u8 reserved_at_23[0x5];
-       u8 source_qp[0x18];
-};
-
-struct mlx5_ifc_definer_hl_ib_l4_bits {
-       u8 opcode[0x8];
-       u8 qp[0x18];
-       u8 se[0x1];
-       u8 migreq[0x1];
-       u8 ackreq[0x1];
-       u8 fecn[0x1];
-       u8 becn[0x1];
-       u8 bth[0x1];
-       u8 deth[0x1];
-       u8 dcceth[0x1];
-       u8 reserved_at_28[0x2];
-       u8 pad_count[0x2];
-       u8 tver[0x4];
-       u8 p_key[0x10];
-       u8 reserved_at_40[0x8];
-       u8 deth_source_qp[0x18];
-};
-
-enum mlx5hws_integrity_ok1_bits {
-       MLX5HWS_DEFINER_OKS1_FIRST_L4_OK = 24,
-       MLX5HWS_DEFINER_OKS1_FIRST_L3_OK = 25,
-       MLX5HWS_DEFINER_OKS1_SECOND_L4_OK = 26,
-       MLX5HWS_DEFINER_OKS1_SECOND_L3_OK = 27,
-       MLX5HWS_DEFINER_OKS1_FIRST_L4_CSUM_OK = 28,
-       MLX5HWS_DEFINER_OKS1_FIRST_IPV4_CSUM_OK = 29,
-       MLX5HWS_DEFINER_OKS1_SECOND_L4_CSUM_OK = 30,
-       MLX5HWS_DEFINER_OKS1_SECOND_IPV4_CSUM_OK = 31,
-};
-
-struct mlx5_ifc_definer_hl_oks1_bits {
-       union {
-               u8 oks1_bits[0x20];
-               struct {
-                       u8 second_ipv4_checksum_ok[0x1];
-                       u8 second_l4_checksum_ok[0x1];
-                       u8 first_ipv4_checksum_ok[0x1];
-                       u8 first_l4_checksum_ok[0x1];
-                       u8 second_l3_ok[0x1];
-                       u8 second_l4_ok[0x1];
-                       u8 first_l3_ok[0x1];
-                       u8 first_l4_ok[0x1];
-                       u8 flex_parser7_steering_ok[0x1];
-                       u8 flex_parser6_steering_ok[0x1];
-                       u8 flex_parser5_steering_ok[0x1];
-                       u8 flex_parser4_steering_ok[0x1];
-                       u8 flex_parser3_steering_ok[0x1];
-                       u8 flex_parser2_steering_ok[0x1];
-                       u8 flex_parser1_steering_ok[0x1];
-                       u8 flex_parser0_steering_ok[0x1];
-                       u8 second_ipv6_extension_header_vld[0x1];
-                       u8 first_ipv6_extension_header_vld[0x1];
-                       u8 l3_tunneling_ok[0x1];
-                       u8 l2_tunneling_ok[0x1];
-                       u8 second_tcp_ok[0x1];
-                       u8 second_udp_ok[0x1];
-                       u8 second_ipv4_ok[0x1];
-                       u8 second_ipv6_ok[0x1];
-                       u8 second_l2_ok[0x1];
-                       u8 vxlan_ok[0x1];
-                       u8 gre_ok[0x1];
-                       u8 first_tcp_ok[0x1];
-                       u8 first_udp_ok[0x1];
-                       u8 first_ipv4_ok[0x1];
-                       u8 first_ipv6_ok[0x1];
-                       u8 first_l2_ok[0x1];
-               };
-       };
-};
-
-struct mlx5_ifc_definer_hl_oks2_bits {
-       u8 reserved_at_0[0xa];
-       u8 second_mpls_ok[0x1];
-       u8 second_mpls4_s_bit[0x1];
-       u8 second_mpls4_qualifier[0x1];
-       u8 second_mpls3_s_bit[0x1];
-       u8 second_mpls3_qualifier[0x1];
-       u8 second_mpls2_s_bit[0x1];
-       u8 second_mpls2_qualifier[0x1];
-       u8 second_mpls1_s_bit[0x1];
-       u8 second_mpls1_qualifier[0x1];
-       u8 second_mpls0_s_bit[0x1];
-       u8 second_mpls0_qualifier[0x1];
-       u8 first_mpls_ok[0x1];
-       u8 first_mpls4_s_bit[0x1];
-       u8 first_mpls4_qualifier[0x1];
-       u8 first_mpls3_s_bit[0x1];
-       u8 first_mpls3_qualifier[0x1];
-       u8 first_mpls2_s_bit[0x1];
-       u8 first_mpls2_qualifier[0x1];
-       u8 first_mpls1_s_bit[0x1];
-       u8 first_mpls1_qualifier[0x1];
-       u8 first_mpls0_s_bit[0x1];
-       u8 first_mpls0_qualifier[0x1];
-};
-
-struct mlx5_ifc_definer_hl_voq_bits {
-       u8 reserved_at_0[0x18];
-       u8 ecn_ok[0x1];
-       u8 congestion[0x1];
-       u8 profile[0x2];
-       u8 internal_prio[0x4];
-};
-
-struct mlx5_ifc_definer_hl_ipv4_src_dst_bits {
-       u8 source_address[0x20];
-       u8 destination_address[0x20];
-};
-
-struct mlx5_ifc_definer_hl_random_number_bits {
-       u8 random_number[0x10];
-       u8 reserved[0x10];
-};
-
-struct mlx5_ifc_definer_hl_ipv6_addr_bits {
-       u8 ipv6_address_127_96[0x20];
-       u8 ipv6_address_95_64[0x20];
-       u8 ipv6_address_63_32[0x20];
-       u8 ipv6_address_31_0[0x20];
-};
-
-struct mlx5_ifc_definer_tcp_icmp_header_bits {
-       union {
-               struct {
-                       u8 icmp_dw1[0x20];
-                       u8 icmp_dw2[0x20];
-                       u8 icmp_dw3[0x20];
-               };
-               struct {
-                       u8 tcp_seq[0x20];
-                       u8 tcp_ack[0x20];
-                       u8 tcp_win_urg[0x20];
-               };
-       };
-};
-
-struct mlx5_ifc_definer_hl_tunnel_header_bits {
-       u8 tunnel_header_0[0x20];
-       u8 tunnel_header_1[0x20];
-       u8 tunnel_header_2[0x20];
-       u8 tunnel_header_3[0x20];
-};
-
-struct mlx5_ifc_definer_hl_ipsec_bits {
-       u8 spi[0x20];
-       u8 sequence_number[0x20];
-       u8 reserved[0x10];
-       u8 ipsec_syndrome[0x8];
-       u8 next_header[0x8];
-};
-
-struct mlx5_ifc_definer_hl_metadata_bits {
-       u8 metadata_to_cqe[0x20];
-       u8 general_purpose[0x20];
-       u8 acomulated_hash[0x20];
-};
-
-struct mlx5_ifc_definer_hl_flex_parser_bits {
-       u8 flex_parser_7[0x20];
-       u8 flex_parser_6[0x20];
-       u8 flex_parser_5[0x20];
-       u8 flex_parser_4[0x20];
-       u8 flex_parser_3[0x20];
-       u8 flex_parser_2[0x20];
-       u8 flex_parser_1[0x20];
-       u8 flex_parser_0[0x20];
-};
-
-struct mlx5_ifc_definer_hl_registers_bits {
-       u8 register_c_10[0x20];
-       u8 register_c_11[0x20];
-       u8 register_c_8[0x20];
-       u8 register_c_9[0x20];
-       u8 register_c_6[0x20];
-       u8 register_c_7[0x20];
-       u8 register_c_4[0x20];
-       u8 register_c_5[0x20];
-       u8 register_c_2[0x20];
-       u8 register_c_3[0x20];
-       u8 register_c_0[0x20];
-       u8 register_c_1[0x20];
-};
-
-struct mlx5_ifc_definer_hl_mpls_bits {
-       u8 mpls0_label[0x20];
-       u8 mpls1_label[0x20];
-       u8 mpls2_label[0x20];
-       u8 mpls3_label[0x20];
-       u8 mpls4_label[0x20];
-};
-
-struct mlx5_ifc_definer_hl_bits {
-       struct mlx5_ifc_definer_hl_eth_l2_bits eth_l2_outer;
-       struct mlx5_ifc_definer_hl_eth_l2_bits eth_l2_inner;
-       struct mlx5_ifc_definer_hl_eth_l2_src_bits eth_l2_src_outer;
-       struct mlx5_ifc_definer_hl_eth_l2_src_bits eth_l2_src_inner;
-       struct mlx5_ifc_definer_hl_ib_l2_bits ib_l2;
-       struct mlx5_ifc_definer_hl_eth_l3_bits eth_l3_outer;
-       struct mlx5_ifc_definer_hl_eth_l3_bits eth_l3_inner;
-       struct mlx5_ifc_definer_hl_eth_l4_bits eth_l4_outer;
-       struct mlx5_ifc_definer_hl_eth_l4_bits eth_l4_inner;
-       struct mlx5_ifc_definer_hl_src_qp_gvmi_bits source_qp_gvmi;
-       struct mlx5_ifc_definer_hl_ib_l4_bits ib_l4;
-       struct mlx5_ifc_definer_hl_oks1_bits oks1;
-       struct mlx5_ifc_definer_hl_oks2_bits oks2;
-       struct mlx5_ifc_definer_hl_voq_bits voq;
-       u8 reserved_at_480[0x380];
-       struct mlx5_ifc_definer_hl_ipv4_src_dst_bits ipv4_src_dest_outer;
-       struct mlx5_ifc_definer_hl_ipv4_src_dst_bits ipv4_src_dest_inner;
-       struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_dst_outer;
-       struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_dst_inner;
-       struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_src_outer;
-       struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_src_inner;
-       u8 unsupported_dest_ib_l3[0x80];
-       u8 unsupported_source_ib_l3[0x80];
-       u8 unsupported_udp_misc_outer[0x20];
-       u8 unsupported_udp_misc_inner[0x20];
-       struct mlx5_ifc_definer_tcp_icmp_header_bits tcp_icmp;
-       struct mlx5_ifc_definer_hl_tunnel_header_bits tunnel_header;
-       struct mlx5_ifc_definer_hl_mpls_bits mpls_outer;
-       struct mlx5_ifc_definer_hl_mpls_bits mpls_inner;
-       u8 unsupported_config_headers_outer[0x80];
-       u8 unsupported_config_headers_inner[0x80];
-       struct mlx5_ifc_definer_hl_random_number_bits random_number;
-       struct mlx5_ifc_definer_hl_ipsec_bits ipsec;
-       struct mlx5_ifc_definer_hl_metadata_bits metadata;
-       u8 unsupported_utc_timestamp[0x40];
-       u8 unsupported_free_running_timestamp[0x40];
-       struct mlx5_ifc_definer_hl_flex_parser_bits flex_parser;
-       struct mlx5_ifc_definer_hl_registers_bits registers;
-       /* Reserved in case header layout on future HW */
-       u8 unsupported_reserved[0xd40];
-};
-
-enum mlx5hws_definer_gtp {
-       MLX5HWS_DEFINER_GTP_EXT_HDR_BIT = 0x04,
-};
-
-struct mlx5_ifc_header_gtp_bits {
-       u8 version[0x3];
-       u8 proto_type[0x1];
-       u8 reserved1[0x1];
-       union {
-               u8 msg_flags[0x3];
-               struct {
-                       u8 ext_hdr_flag[0x1];
-                       u8 seq_num_flag[0x1];
-                       u8 pdu_flag[0x1];
-               };
-       };
-       u8 msg_type[0x8];
-       u8 msg_len[0x8];
-       u8 teid[0x20];
-};
-
-struct mlx5_ifc_header_opt_gtp_bits {
-       u8 seq_num[0x10];
-       u8 pdu_num[0x8];
-       u8 next_ext_hdr_type[0x8];
-};
-
-struct mlx5_ifc_header_gtp_psc_bits {
-       u8 len[0x8];
-       u8 pdu_type[0x4];
-       u8 flags[0x4];
-       u8 qfi[0x8];
-       u8 reserved2[0x8];
-};
-
-struct mlx5_ifc_header_ipv6_vtc_bits {
-       u8 version[0x4];
-       union {
-               u8 tos[0x8];
-               struct {
-                       u8 dscp[0x6];
-                       u8 ecn[0x2];
-               };
-       };
-       u8 flow_label[0x14];
-};
-
-struct mlx5_ifc_header_ipv6_routing_ext_bits {
-       u8 next_hdr[0x8];
-       u8 hdr_len[0x8];
-       u8 type[0x8];
-       u8 segments_left[0x8];
-       union {
-               u8 flags[0x20];
-               struct {
-                       u8 last_entry[0x8];
-                       u8 flag[0x8];
-                       u8 tag[0x10];
-               };
-       };
-};
-
-struct mlx5_ifc_header_vxlan_bits {
-       u8 flags[0x8];
-       u8 reserved1[0x18];
-       u8 vni[0x18];
-       u8 reserved2[0x8];
-};
-
-struct mlx5_ifc_header_vxlan_gpe_bits {
-       u8 flags[0x8];
-       u8 rsvd0[0x10];
-       u8 protocol[0x8];
-       u8 vni[0x18];
-       u8 rsvd1[0x8];
-};
-
-struct mlx5_ifc_header_gre_bits {
-       union {
-               u8 c_rsvd0_ver[0x10];
-               struct {
-                       u8 gre_c_present[0x1];
-                       u8 reserved_at_1[0x1];
-                       u8 gre_k_present[0x1];
-                       u8 gre_s_present[0x1];
-                       u8 reserved_at_4[0x9];
-                       u8 version[0x3];
-               };
-       };
-       u8 gre_protocol[0x10];
-       u8 checksum[0x10];
-       u8 reserved_at_30[0x10];
-};
-
-struct mlx5_ifc_header_geneve_bits {
-       union {
-               u8 ver_opt_len_o_c_rsvd[0x10];
-               struct {
-                       u8 version[0x2];
-                       u8 opt_len[0x6];
-                       u8 o_flag[0x1];
-                       u8 c_flag[0x1];
-                       u8 reserved_at_a[0x6];
-               };
-       };
-       u8 protocol_type[0x10];
-       u8 vni[0x18];
-       u8 reserved_at_38[0x8];
-};
-
-struct mlx5_ifc_header_geneve_opt_bits {
-       u8 class[0x10];
-       u8 type[0x8];
-       u8 reserved[0x3];
-       u8 len[0x5];
-};
-
-struct mlx5_ifc_header_icmp_bits {
-       union {
-               u8 icmp_dw1[0x20];
-               struct {
-                       u8 type[0x8];
-                       u8 code[0x8];
-                       u8 cksum[0x10];
-               };
-       };
-       union {
-               u8 icmp_dw2[0x20];
-               struct {
-                       u8 ident[0x10];
-                       u8 seq_nb[0x10];
-               };
-       };
-};
-
-struct mlx5hws_definer {
-       enum mlx5hws_definer_type type;
-       u8 dw_selector[DW_SELECTORS];
-       u8 byte_selector[BYTE_SELECTORS];
-       struct mlx5hws_rule_match_tag mask;
-       u32 obj_id;
-};
-
-struct mlx5hws_definer_cache {
-       struct list_head list_head;
-};
-
-struct mlx5hws_definer_cache_item {
-       struct mlx5hws_definer definer;
-       u32 refcount;
-       struct list_head list_node;
-};
-
-static inline bool
-mlx5hws_definer_is_jumbo(struct mlx5hws_definer *definer)
-{
-       return (definer->type == MLX5HWS_DEFINER_TYPE_JUMBO);
-}
-
-void mlx5hws_definer_create_tag(u32 *match_param,
-                               struct mlx5hws_definer_fc *fc,
-                               u32 fc_sz,
-                               u8 *tag);
-
-int mlx5hws_definer_get_id(struct mlx5hws_definer *definer);
-
-int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx,
-                           struct mlx5hws_match_template *mt);
-
-void mlx5hws_definer_mt_uninit(struct mlx5hws_context *ctx,
-                              struct mlx5hws_match_template *mt);
-
-int mlx5hws_definer_init_cache(struct mlx5hws_definer_cache **cache);
-
-void mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache *cache);
-
-int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a,
-                           struct mlx5hws_definer *definer_b);
-
-int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx,
-                           struct mlx5hws_definer *definer);
-
-void mlx5hws_definer_free(struct mlx5hws_context *ctx,
-                         struct mlx5hws_definer *definer);
-
-int mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
-                               struct mlx5hws_match_template *mt,
-                               struct mlx5hws_definer *match_definer);
-
-struct mlx5hws_definer_fc *
-mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
-                                                  u8 match_criteria_enable,
-                                                  u32 *match_param,
-                                                  int *fc_sz);
-
-#endif /* MLX5HWS_DEFINER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h
deleted file mode 100644 (file)
index 5643be1..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#ifndef MLX5HWS_INTERNAL_H_
-#define MLX5HWS_INTERNAL_H_
-
-#include <linux/mlx5/transobj.h>
-#include <linux/mlx5/vport.h>
-#include "fs_core.h"
-#include "wq.h"
-#include "lib/mlx5.h"
-
-#include "mlx5hws_prm.h"
-#include "mlx5hws.h"
-#include "mlx5hws_pool.h"
-#include "mlx5hws_vport.h"
-#include "mlx5hws_context.h"
-#include "mlx5hws_table.h"
-#include "mlx5hws_send.h"
-#include "mlx5hws_rule.h"
-#include "mlx5hws_cmd.h"
-#include "mlx5hws_action.h"
-#include "mlx5hws_definer.h"
-#include "mlx5hws_matcher.h"
-#include "mlx5hws_debug.h"
-#include "mlx5hws_pat_arg.h"
-#include "mlx5hws_bwc.h"
-#include "mlx5hws_bwc_complex.h"
-
-#define W_SIZE         2
-#define DW_SIZE                4
-#define BITS_IN_BYTE   8
-#define BITS_IN_DW     (BITS_IN_BYTE * DW_SIZE)
-
-#define IS_BIT_SET(_value, _bit) ((_value) & (1ULL << (_bit)))
-
-#define mlx5hws_err(ctx, arg...) mlx5_core_err((ctx)->mdev, ##arg)
-#define mlx5hws_info(ctx, arg...) mlx5_core_info((ctx)->mdev, ##arg)
-#define mlx5hws_dbg(ctx, arg...) mlx5_core_dbg((ctx)->mdev, ##arg)
-
-#define MLX5HWS_TABLE_TYPE_BASE 2
-#define MLX5HWS_ACTION_STE_IDX_ANY 0
-
-static inline bool is_mem_zero(const u8 *mem, size_t size)
-{
-       if (unlikely(!size)) {
-               pr_warn("HWS: invalid buffer of size 0 in %s\n", __func__);
-               return true;
-       }
-
-       return (*mem == 0) && memcmp(mem, mem + 1, size - 1) == 0;
-}
-
-static inline unsigned long align(unsigned long val, unsigned long align)
-{
-       return (val + align - 1) & ~(align - 1);
-}
-
-#endif /* MLX5HWS_INTERNAL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c
deleted file mode 100644 (file)
index 61a1155..0000000
+++ /dev/null
@@ -1,1216 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#include "mlx5hws_internal.h"
-
-enum mlx5hws_matcher_rtc_type {
-       HWS_MATCHER_RTC_TYPE_MATCH,
-       HWS_MATCHER_RTC_TYPE_STE_ARRAY,
-       HWS_MATCHER_RTC_TYPE_MAX,
-};
-
-static const char * const mlx5hws_matcher_rtc_type_str[] = {
-       [HWS_MATCHER_RTC_TYPE_MATCH] = "MATCH",
-       [HWS_MATCHER_RTC_TYPE_STE_ARRAY] = "STE_ARRAY",
-       [HWS_MATCHER_RTC_TYPE_MAX] = "UNKNOWN",
-};
-
-static const char *hws_matcher_rtc_type_to_str(enum mlx5hws_matcher_rtc_type rtc_type)
-{
-       if (rtc_type > HWS_MATCHER_RTC_TYPE_MAX)
-               rtc_type = HWS_MATCHER_RTC_TYPE_MAX;
-       return mlx5hws_matcher_rtc_type_str[rtc_type];
-}
-
-static bool hws_matcher_requires_col_tbl(u8 log_num_of_rules)
-{
-       /* Collision table concatenation is done only for large rule tables */
-       return log_num_of_rules > MLX5HWS_MATCHER_ASSURED_RULES_TH;
-}
-
-static u8 hws_matcher_rules_to_tbl_depth(u8 log_num_of_rules)
-{
-       if (hws_matcher_requires_col_tbl(log_num_of_rules))
-               return MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH;
-
-       /* For small rule tables we use a single deep table to assure insertion */
-       return min(log_num_of_rules, MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH);
-}
-
-static void hws_matcher_destroy_end_ft(struct mlx5hws_matcher *matcher)
-{
-       mlx5hws_table_destroy_default_ft(matcher->tbl, matcher->end_ft_id);
-}
-
-static int hws_matcher_create_end_ft(struct mlx5hws_matcher *matcher)
-{
-       struct mlx5hws_table *tbl = matcher->tbl;
-       int ret;
-
-       ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &matcher->end_ft_id);
-       if (ret) {
-               mlx5hws_err(tbl->ctx, "Failed to create matcher end flow table\n");
-               return ret;
-       }
-       return 0;
-}
-
-static int hws_matcher_connect(struct mlx5hws_matcher *matcher)
-{
-       struct mlx5hws_table *tbl = matcher->tbl;
-       struct mlx5hws_context *ctx = tbl->ctx;
-       struct mlx5hws_matcher *prev = NULL;
-       struct mlx5hws_matcher *next = NULL;
-       struct mlx5hws_matcher *tmp_matcher;
-       int ret;
-
-       /* Find location in matcher list */
-       if (list_empty(&tbl->matchers_list)) {
-               list_add(&matcher->list_node, &tbl->matchers_list);
-               goto connect;
-       }
-
-       list_for_each_entry(tmp_matcher, &tbl->matchers_list, list_node) {
-               if (tmp_matcher->attr.priority > matcher->attr.priority) {
-                       next = tmp_matcher;
-                       break;
-               }
-               prev = tmp_matcher;
-       }
-
-       if (next)
-               /* insert before next */
-               list_add_tail(&matcher->list_node, &next->list_node);
-       else
-               /* insert after prev */
-               list_add(&matcher->list_node, &prev->list_node);
-
-connect:
-       if (next) {
-               /* Connect to next RTC */
-               ret = mlx5hws_table_ft_set_next_rtc(ctx,
-                                                   matcher->end_ft_id,
-                                                   tbl->fw_ft_type,
-                                                   next->match_ste.rtc_0_id,
-                                                   next->match_ste.rtc_1_id);
-               if (ret) {
-                       mlx5hws_err(ctx, "Failed to connect new matcher to next RTC\n");
-                       goto remove_from_list;
-               }
-       } else {
-               /* Connect last matcher to next miss_tbl if exists */
-               ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl);
-               if (ret) {
-                       mlx5hws_err(ctx, "Failed connect new matcher to miss_tbl\n");
-                       goto remove_from_list;
-               }
-       }
-
-       /* Connect to previous FT */
-       ret = mlx5hws_table_ft_set_next_rtc(ctx,
-                                           prev ? prev->end_ft_id : tbl->ft_id,
-                                           tbl->fw_ft_type,
-                                           matcher->match_ste.rtc_0_id,
-                                           matcher->match_ste.rtc_1_id);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to connect new matcher to previous FT\n");
-               goto remove_from_list;
-       }
-
-       /* Reset prev matcher FT default miss (drop refcount) */
-       ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev ? prev->end_ft_id : tbl->ft_id);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to reset matcher ft default miss\n");
-               goto remove_from_list;
-       }
-
-       if (!prev) {
-               /* Update tables missing to current matcher in the table */
-               ret = mlx5hws_table_update_connected_miss_tables(tbl);
-               if (ret) {
-                       mlx5hws_err(ctx, "Fatal error, failed to update connected miss table\n");
-                       goto remove_from_list;
-               }
-       }
-
-       return 0;
-
-remove_from_list:
-       list_del_init(&matcher->list_node);
-       return ret;
-}
-
-static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
-{
-       struct mlx5hws_matcher *next = NULL, *prev = NULL;
-       struct mlx5hws_table *tbl = matcher->tbl;
-       u32 prev_ft_id = tbl->ft_id;
-       int ret;
-
-       if (!list_is_first(&matcher->list_node, &tbl->matchers_list)) {
-               prev = list_prev_entry(matcher, list_node);
-               prev_ft_id = prev->end_ft_id;
-       }
-
-       if (!list_is_last(&matcher->list_node, &tbl->matchers_list))
-               next = list_next_entry(matcher, list_node);
-
-       list_del_init(&matcher->list_node);
-
-       if (next) {
-               /* Connect previous end FT to next RTC */
-               ret = mlx5hws_table_ft_set_next_rtc(tbl->ctx,
-                                                   prev_ft_id,
-                                                   tbl->fw_ft_type,
-                                                   next->match_ste.rtc_0_id,
-                                                   next->match_ste.rtc_1_id);
-               if (ret) {
-                       mlx5hws_err(tbl->ctx, "Failed to disconnect matcher\n");
-                       goto matcher_reconnect;
-               }
-       } else {
-               ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl);
-               if (ret) {
-                       mlx5hws_err(tbl->ctx, "Failed to disconnect last matcher\n");
-                       goto matcher_reconnect;
-               }
-       }
-
-       /* Removing first matcher, update connected miss tables if exists */
-       if (prev_ft_id == tbl->ft_id) {
-               ret = mlx5hws_table_update_connected_miss_tables(tbl);
-               if (ret) {
-                       mlx5hws_err(tbl->ctx, "Fatal error, failed to update connected miss table\n");
-                       goto matcher_reconnect;
-               }
-       }
-
-       ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev_ft_id);
-       if (ret) {
-               mlx5hws_err(tbl->ctx, "Fatal error, failed to restore matcher ft default miss\n");
-               goto matcher_reconnect;
-       }
-
-       return 0;
-
-matcher_reconnect:
-       if (list_empty(&tbl->matchers_list) || !prev)
-               list_add(&matcher->list_node, &tbl->matchers_list);
-       else
-               /* insert after prev matcher */
-               list_add(&matcher->list_node, &prev->list_node);
-
-       return ret;
-}
-
-static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher,
-                                       struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
-                                       enum mlx5hws_matcher_rtc_type rtc_type,
-                                       bool is_mirror)
-{
-       struct mlx5hws_pool_chunk *ste = &matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].ste;
-       enum mlx5hws_matcher_flow_src flow_src = matcher->attr.optimize_flow_src;
-       bool is_match_rtc = rtc_type == HWS_MATCHER_RTC_TYPE_MATCH;
-
-       if ((flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT && !is_mirror) ||
-           (flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE && is_mirror)) {
-               /* Optimize FDB RTC */
-               rtc_attr->log_size = 0;
-               rtc_attr->log_depth = 0;
-       } else {
-               /* Keep original values */
-               rtc_attr->log_size = is_match_rtc ? matcher->attr.table.sz_row_log : ste->order;
-               rtc_attr->log_depth = is_match_rtc ? matcher->attr.table.sz_col_log : 0;
-       }
-}
-
-static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher,
-                                 enum mlx5hws_matcher_rtc_type rtc_type,
-                                 u8 action_ste_selector)
-{
-       struct mlx5hws_matcher_attr *attr = &matcher->attr;
-       struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
-       struct mlx5hws_match_template *mt = matcher->mt;
-       struct mlx5hws_context *ctx = matcher->tbl->ctx;
-       struct mlx5hws_action_default_stc *default_stc;
-       struct mlx5hws_matcher_action_ste *action_ste;
-       struct mlx5hws_table *tbl = matcher->tbl;
-       struct mlx5hws_pool *ste_pool, *stc_pool;
-       struct mlx5hws_pool_chunk *ste;
-       u32 *rtc_0_id, *rtc_1_id;
-       u32 obj_id;
-       int ret;
-
-       switch (rtc_type) {
-       case HWS_MATCHER_RTC_TYPE_MATCH:
-               rtc_0_id = &matcher->match_ste.rtc_0_id;
-               rtc_1_id = &matcher->match_ste.rtc_1_id;
-               ste_pool = matcher->match_ste.pool;
-               ste = &matcher->match_ste.ste;
-               ste->order = attr->table.sz_col_log + attr->table.sz_row_log;
-
-               rtc_attr.log_size = attr->table.sz_row_log;
-               rtc_attr.log_depth = attr->table.sz_col_log;
-               rtc_attr.is_frst_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
-               rtc_attr.is_scnd_range = 0;
-               rtc_attr.miss_ft_id = matcher->end_ft_id;
-
-               if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) {
-                       /* The usual Hash Table */
-                       rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
-
-                       /* The first mt is used since all share the same definer */
-                       rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
-               } else if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX) {
-                       rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
-                       rtc_attr.num_hash_definer = 1;
-
-                       if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
-                               /* Hash Split Table */
-                               rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
-                               rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
-                       } else if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) {
-                               /* Linear Lookup Table */
-                               rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR;
-                               rtc_attr.match_definer_0 = ctx->caps->linear_match_definer;
-                       }
-               }
-
-               /* Match pool requires implicit allocation */
-               ret = mlx5hws_pool_chunk_alloc(ste_pool, ste);
-               if (ret) {
-                       mlx5hws_err(ctx, "Failed to allocate STE for %s RTC",
-                                   hws_matcher_rtc_type_to_str(rtc_type));
-                       return ret;
-               }
-               break;
-
-       case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
-               action_ste = &matcher->action_ste[action_ste_selector];
-
-               rtc_0_id = &action_ste->rtc_0_id;
-               rtc_1_id = &action_ste->rtc_1_id;
-               ste_pool = action_ste->pool;
-               ste = &action_ste->ste;
-               ste->order = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
-                            attr->table.sz_row_log;
-               rtc_attr.log_size = ste->order;
-               rtc_attr.log_depth = 0;
-               rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
-               /* The action STEs use the default always hit definer */
-               rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
-               rtc_attr.is_frst_jumbo = false;
-               rtc_attr.miss_ft_id = 0;
-               break;
-
-       default:
-               mlx5hws_err(ctx, "HWS Invalid RTC type\n");
-               return -EINVAL;
-       }
-
-       obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
-
-       rtc_attr.pd = ctx->pd_num;
-       rtc_attr.ste_base = obj_id;
-       rtc_attr.ste_offset = ste->offset;
-       rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
-       rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, false);
-       hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, false);
-
-       /* STC is a single resource (obj_id), use any STC for the ID */
-       stc_pool = ctx->stc_pool[tbl->type];
-       default_stc = ctx->common_res[tbl->type].default_stc;
-       obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
-       rtc_attr.stc_base = obj_id;
-
-       ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to create matcher RTC of type %s",
-                           hws_matcher_rtc_type_to_str(rtc_type));
-               goto free_ste;
-       }
-
-       if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
-               obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
-               rtc_attr.ste_base = obj_id;
-               rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, true);
-
-               obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit);
-               rtc_attr.stc_base = obj_id;
-               hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, true);
-
-               ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id);
-               if (ret) {
-                       mlx5hws_err(ctx, "Failed to create peer matcher RTC of type %s",
-                                   hws_matcher_rtc_type_to_str(rtc_type));
-                       goto destroy_rtc_0;
-               }
-       }
-
-       return 0;
-
-destroy_rtc_0:
-       mlx5hws_cmd_rtc_destroy(ctx->mdev, *rtc_0_id);
-free_ste:
-       if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH)
-               mlx5hws_pool_chunk_free(ste_pool, ste);
-       return ret;
-}
-
-static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher,
-                                   enum mlx5hws_matcher_rtc_type rtc_type,
-                                   u8 action_ste_selector)
-{
-       struct mlx5hws_matcher_action_ste *action_ste;
-       struct mlx5hws_table *tbl = matcher->tbl;
-       struct mlx5hws_pool_chunk *ste;
-       struct mlx5hws_pool *ste_pool;
-       u32 rtc_0_id, rtc_1_id;
-
-       switch (rtc_type) {
-       case HWS_MATCHER_RTC_TYPE_MATCH:
-               rtc_0_id = matcher->match_ste.rtc_0_id;
-               rtc_1_id = matcher->match_ste.rtc_1_id;
-               ste_pool = matcher->match_ste.pool;
-               ste = &matcher->match_ste.ste;
-               break;
-       case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
-               action_ste = &matcher->action_ste[action_ste_selector];
-               rtc_0_id = action_ste->rtc_0_id;
-               rtc_1_id = action_ste->rtc_1_id;
-               ste_pool = action_ste->pool;
-               ste = &action_ste->ste;
-               break;
-       default:
-               return;
-       }
-
-       if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
-               mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_1_id);
-
-       mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_0_id);
-       if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH)
-               mlx5hws_pool_chunk_free(ste_pool, ste);
-}
-
-static int
-hws_matcher_check_attr_sz(struct mlx5hws_cmd_query_caps *caps,
-                         struct mlx5hws_matcher *matcher)
-{
-       struct mlx5hws_matcher_attr *attr = &matcher->attr;
-
-       if (attr->table.sz_col_log > caps->rtc_log_depth_max) {
-               mlx5hws_err(matcher->tbl->ctx, "Matcher depth exceeds limit %d\n",
-                           caps->rtc_log_depth_max);
-               return -EOPNOTSUPP;
-       }
-
-       if (attr->table.sz_col_log + attr->table.sz_row_log > caps->ste_alloc_log_max) {
-               mlx5hws_err(matcher->tbl->ctx, "Total matcher size exceeds limit %d\n",
-                           caps->ste_alloc_log_max);
-               return -EOPNOTSUPP;
-       }
-
-       if (attr->table.sz_col_log + attr->table.sz_row_log < caps->ste_alloc_log_gran) {
-               mlx5hws_err(matcher->tbl->ctx, "Total matcher size below limit %d\n",
-                           caps->ste_alloc_log_gran);
-               return -EOPNOTSUPP;
-       }
-
-       return 0;
-}
-
-static void hws_matcher_set_pool_attr(struct mlx5hws_pool_attr *attr,
-                                     struct mlx5hws_matcher *matcher)
-{
-       switch (matcher->attr.optimize_flow_src) {
-       case MLX5HWS_MATCHER_FLOW_SRC_VPORT:
-               attr->opt_type = MLX5HWS_POOL_OPTIMIZE_ORIG;
-               break;
-       case MLX5HWS_MATCHER_FLOW_SRC_WIRE:
-               attr->opt_type = MLX5HWS_POOL_OPTIMIZE_MIRROR;
-               break;
-       default:
-               break;
-       }
-}
-
-static int hws_matcher_check_and_process_at(struct mlx5hws_matcher *matcher,
-                                           struct mlx5hws_action_template *at)
-{
-       struct mlx5hws_context *ctx = matcher->tbl->ctx;
-       bool valid;
-       int ret;
-
-       valid = mlx5hws_action_check_combo(ctx, at->action_type_arr, matcher->tbl->type);
-       if (!valid) {
-               mlx5hws_err(ctx, "Invalid combination in action template\n");
-               return -EINVAL;
-       }
-
-       /* Process action template to setters */
-       ret = mlx5hws_action_template_process(at);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to process action template\n");
-               return ret;
-       }
-
-       return 0;
-}
-
-static int hws_matcher_resize_init(struct mlx5hws_matcher *src_matcher)
-{
-       struct mlx5hws_matcher_resize_data *resize_data;
-
-       resize_data = kzalloc(sizeof(*resize_data), GFP_KERNEL);
-       if (!resize_data)
-               return -ENOMEM;
-
-       resize_data->max_stes = src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes;
-
-       resize_data->action_ste[0].stc = src_matcher->action_ste[0].stc;
-       resize_data->action_ste[0].rtc_0_id = src_matcher->action_ste[0].rtc_0_id;
-       resize_data->action_ste[0].rtc_1_id = src_matcher->action_ste[0].rtc_1_id;
-       resize_data->action_ste[0].pool = src_matcher->action_ste[0].max_stes ?
-                                         src_matcher->action_ste[0].pool :
-                                         NULL;
-       resize_data->action_ste[1].stc = src_matcher->action_ste[1].stc;
-       resize_data->action_ste[1].rtc_0_id = src_matcher->action_ste[1].rtc_0_id;
-       resize_data->action_ste[1].rtc_1_id = src_matcher->action_ste[1].rtc_1_id;
-       resize_data->action_ste[1].pool = src_matcher->action_ste[1].max_stes ?
-                                         src_matcher->action_ste[1].pool :
-                                          NULL;
-
-       /* Place the new resized matcher on the dst matcher's list */
-       list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data);
-
-       /* Move all the previous resized matchers to the dst matcher's list */
-       while (!list_empty(&src_matcher->resize_data)) {
-               resize_data = list_first_entry(&src_matcher->resize_data,
-                                              struct mlx5hws_matcher_resize_data,
-                                              list_node);
-               list_del_init(&resize_data->list_node);
-               list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data);
-       }
-
-       return 0;
-}
-
-static void hws_matcher_resize_uninit(struct mlx5hws_matcher *matcher)
-{
-       struct mlx5hws_matcher_resize_data *resize_data;
-
-       if (!mlx5hws_matcher_is_resizable(matcher))
-               return;
-
-       while (!list_empty(&matcher->resize_data)) {
-               resize_data = list_first_entry(&matcher->resize_data,
-                                              struct mlx5hws_matcher_resize_data,
-                                              list_node);
-               list_del_init(&resize_data->list_node);
-
-               if (resize_data->max_stes) {
-                       mlx5hws_action_free_single_stc(matcher->tbl->ctx,
-                                                      matcher->tbl->type,
-                                                      &resize_data->action_ste[1].stc);
-                       mlx5hws_action_free_single_stc(matcher->tbl->ctx,
-                                                      matcher->tbl->type,
-                                                      &resize_data->action_ste[0].stc);
-
-                       if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
-                               mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
-                                                       resize_data->action_ste[1].rtc_1_id);
-                               mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
-                                                       resize_data->action_ste[0].rtc_1_id);
-                       }
-                       mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
-                                               resize_data->action_ste[1].rtc_0_id);
-                       mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
-                                               resize_data->action_ste[0].rtc_0_id);
-                       if (resize_data->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].pool) {
-                               mlx5hws_pool_destroy(resize_data->action_ste[1].pool);
-                               mlx5hws_pool_destroy(resize_data->action_ste[0].pool);
-                       }
-               }
-
-               kfree(resize_data);
-       }
-}
-
-static int
-hws_matcher_bind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
-{
-       struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
-       struct mlx5hws_matcher_action_ste *action_ste;
-       struct mlx5hws_table *tbl = matcher->tbl;
-       struct mlx5hws_pool_attr pool_attr = {0};
-       struct mlx5hws_context *ctx = tbl->ctx;
-       int ret;
-
-       action_ste = &matcher->action_ste[action_ste_selector];
-
-       /* Allocate action STE mempool */
-       pool_attr.table_type = tbl->type;
-       pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
-       pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL;
-       pool_attr.alloc_log_sz = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
-                                matcher->attr.table.sz_row_log;
-       hws_matcher_set_pool_attr(&pool_attr, matcher);
-       action_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
-       if (!action_ste->pool) {
-               mlx5hws_err(ctx, "Failed to create action ste pool\n");
-               return -EINVAL;
-       }
-
-       /* Allocate action RTC */
-       ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to create action RTC\n");
-               goto free_ste_pool;
-       }
-
-       /* Allocate STC for jumps to STE */
-       stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
-       stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
-       stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
-       stc_attr.ste_table.ste = action_ste->ste;
-       stc_attr.ste_table.ste_pool = action_ste->pool;
-       stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
-
-       ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl->type,
-                                             &action_ste->stc);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to create action jump to table STC\n");
-               goto free_rtc;
-       }
-
-       return 0;
-
-free_rtc:
-       hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
-free_ste_pool:
-       mlx5hws_pool_destroy(action_ste->pool);
-       return ret;
-}
-
-static void hws_matcher_unbind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
-{
-       struct mlx5hws_matcher_action_ste *action_ste;
-       struct mlx5hws_table *tbl = matcher->tbl;
-
-       action_ste = &matcher->action_ste[action_ste_selector];
-
-       if (!action_ste->max_stes ||
-           matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION ||
-           mlx5hws_matcher_is_in_resize(matcher))
-               return;
-
-       mlx5hws_action_free_single_stc(tbl->ctx, tbl->type, &action_ste->stc);
-       hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
-       mlx5hws_pool_destroy(action_ste->pool);
-}
-
-static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher)
-{
-       bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
-       struct mlx5hws_table *tbl = matcher->tbl;
-       struct mlx5hws_context *ctx = tbl->ctx;
-       u32 required_stes;
-       u8 max_stes = 0;
-       int i, ret;
-
-       if (matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)
-               return 0;
-
-       for (i = 0; i < matcher->num_of_at; i++) {
-               struct mlx5hws_action_template *at = &matcher->at[i];
-
-               ret = hws_matcher_check_and_process_at(matcher, at);
-               if (ret) {
-                       mlx5hws_err(ctx, "Invalid at %d", i);
-                       return ret;
-               }
-
-               required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
-               max_stes = max(max_stes, required_stes);
-
-               /* Future: Optimize reparse */
-       }
-
-       /* There are no additional STEs required for matcher */
-       if (!max_stes)
-               return 0;
-
-       matcher->action_ste[0].max_stes = max_stes;
-       matcher->action_ste[1].max_stes = max_stes;
-
-       ret = hws_matcher_bind_at_idx(matcher, 0);
-       if (ret)
-               return ret;
-
-       ret = hws_matcher_bind_at_idx(matcher, 1);
-       if (ret)
-               goto free_at_0;
-
-       return 0;
-
-free_at_0:
-       hws_matcher_unbind_at_idx(matcher, 0);
-       return ret;
-}
-
-static void hws_matcher_unbind_at(struct mlx5hws_matcher *matcher)
-{
-       hws_matcher_unbind_at_idx(matcher, 1);
-       hws_matcher_unbind_at_idx(matcher, 0);
-}
-
-static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher)
-{
-       struct mlx5hws_context *ctx = matcher->tbl->ctx;
-       struct mlx5hws_pool_attr pool_attr = {0};
-       int ret;
-
-       /* Calculate match, range and hash definers */
-       if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)) {
-               ret = mlx5hws_definer_mt_init(ctx, matcher->mt);
-               if (ret) {
-                       if (ret == -E2BIG)
-                               mlx5hws_err(ctx, "Failed to set matcher templates with match definers\n");
-                       return ret;
-               }
-       }
-
-       /* Create an STE pool per matcher*/
-       pool_attr.table_type = matcher->tbl->type;
-       pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
-       pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL;
-       pool_attr.alloc_log_sz = matcher->attr.table.sz_col_log +
-                                matcher->attr.table.sz_row_log;
-       hws_matcher_set_pool_attr(&pool_attr, matcher);
-
-       matcher->match_ste.pool = mlx5hws_pool_create(ctx, &pool_attr);
-       if (!matcher->match_ste.pool) {
-               mlx5hws_err(ctx, "Failed to allocate matcher STE pool\n");
-               ret = -EOPNOTSUPP;
-               goto uninit_match_definer;
-       }
-
-       return 0;
-
-uninit_match_definer:
-       if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION))
-               mlx5hws_definer_mt_uninit(ctx, matcher->mt);
-       return ret;
-}
-
-static void hws_matcher_unbind_mt(struct mlx5hws_matcher *matcher)
-{
-       mlx5hws_pool_destroy(matcher->match_ste.pool);
-       if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION))
-               mlx5hws_definer_mt_uninit(matcher->tbl->ctx, matcher->mt);
-}
-
-static int
-hws_matcher_validate_insert_mode(struct mlx5hws_cmd_query_caps *caps,
-                                struct mlx5hws_matcher *matcher)
-{
-       struct mlx5hws_matcher_attr *attr = &matcher->attr;
-       struct mlx5hws_context *ctx = matcher->tbl->ctx;
-
-       switch (attr->insert_mode) {
-       case MLX5HWS_MATCHER_INSERT_BY_HASH:
-               if (matcher->attr.distribute_mode != MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
-                       mlx5hws_err(ctx, "Invalid matcher distribute mode\n");
-                       return -EOPNOTSUPP;
-               }
-               break;
-
-       case MLX5HWS_MATCHER_INSERT_BY_INDEX:
-               if (attr->table.sz_col_log) {
-                       mlx5hws_err(ctx, "Matcher with INSERT_BY_INDEX supports only Nx1 table size\n");
-                       return -EOPNOTSUPP;
-               }
-
-               if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
-                       /* Hash Split Table */
-                       if (!caps->rtc_hash_split_table) {
-                               mlx5hws_err(ctx, "FW doesn't support insert by index and hash distribute\n");
-                               return -EOPNOTSUPP;
-                       }
-               } else if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) {
-                       /* Linear Lookup Table */
-                       if (!caps->rtc_linear_lookup_table ||
-                           !IS_BIT_SET(caps->access_index_mode,
-                                       MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR)) {
-                               mlx5hws_err(ctx, "FW doesn't support insert by index and linear distribute\n");
-                               return -EOPNOTSUPP;
-                       }
-
-                       if (attr->table.sz_row_log > MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX) {
-                               mlx5hws_err(ctx, "Matcher with linear distribute: rows exceed limit %d",
-                                           MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX);
-                               return -EOPNOTSUPP;
-                       }
-               } else {
-                       mlx5hws_err(ctx, "Matcher has unsupported distribute mode\n");
-                       return -EOPNOTSUPP;
-               }
-               break;
-
-       default:
-               mlx5hws_err(ctx, "Matcher has unsupported insert mode\n");
-               return -EOPNOTSUPP;
-       }
-
-       return 0;
-}
-
-static int
-hws_matcher_process_attr(struct mlx5hws_cmd_query_caps *caps,
-                        struct mlx5hws_matcher *matcher)
-{
-       struct mlx5hws_matcher_attr *attr = &matcher->attr;
-
-       if (hws_matcher_validate_insert_mode(caps, matcher))
-               return -EOPNOTSUPP;
-
-       if (matcher->tbl->type != MLX5HWS_TABLE_TYPE_FDB && attr->optimize_flow_src) {
-               mlx5hws_err(matcher->tbl->ctx, "NIC domain doesn't support flow_src\n");
-               return -EOPNOTSUPP;
-       }
-
-       /* Convert number of rules to the required depth */
-       if (attr->mode == MLX5HWS_MATCHER_RESOURCE_MODE_RULE &&
-           attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH)
-               attr->table.sz_col_log = hws_matcher_rules_to_tbl_depth(attr->rule.num_log);
-
-       matcher->flags |= attr->resizable ? MLX5HWS_MATCHER_FLAGS_RESIZABLE : 0;
-
-       return hws_matcher_check_attr_sz(caps, matcher);
-}
-
-static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher)
-{
-       int ret;
-
-       /* Select and create the definers for current matcher */
-       ret = hws_matcher_bind_mt(matcher);
-       if (ret)
-               return ret;
-
-       /* Calculate and verify action combination */
-       ret = hws_matcher_bind_at(matcher);
-       if (ret)
-               goto unbind_mt;
-
-       /* Create matcher end flow table anchor */
-       ret = hws_matcher_create_end_ft(matcher);
-       if (ret)
-               goto unbind_at;
-
-       /* Allocate the RTC for the new matcher */
-       ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
-       if (ret)
-               goto destroy_end_ft;
-
-       /* Connect the matcher to the matcher list */
-       ret = hws_matcher_connect(matcher);
-       if (ret)
-               goto destroy_rtc;
-
-       return 0;
-
-destroy_rtc:
-       hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
-destroy_end_ft:
-       hws_matcher_destroy_end_ft(matcher);
-unbind_at:
-       hws_matcher_unbind_at(matcher);
-unbind_mt:
-       hws_matcher_unbind_mt(matcher);
-       return ret;
-}
-
-static void hws_matcher_destroy_and_disconnect(struct mlx5hws_matcher *matcher)
-{
-       hws_matcher_resize_uninit(matcher);
-       hws_matcher_disconnect(matcher);
-       hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
-       hws_matcher_destroy_end_ft(matcher);
-       hws_matcher_unbind_at(matcher);
-       hws_matcher_unbind_mt(matcher);
-}
-
-static int
-hws_matcher_create_col_matcher(struct mlx5hws_matcher *matcher)
-{
-       struct mlx5hws_context *ctx = matcher->tbl->ctx;
-       struct mlx5hws_matcher *col_matcher;
-       int ret;
-
-       if (matcher->attr.mode != MLX5HWS_MATCHER_RESOURCE_MODE_RULE ||
-           matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX)
-               return 0;
-
-       if (!hws_matcher_requires_col_tbl(matcher->attr.rule.num_log))
-               return 0;
-
-       col_matcher = kzalloc(sizeof(*matcher), GFP_KERNEL);
-       if (!col_matcher)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&col_matcher->resize_data);
-
-       col_matcher->tbl = matcher->tbl;
-       col_matcher->mt = matcher->mt;
-       col_matcher->at = matcher->at;
-       col_matcher->num_of_at = matcher->num_of_at;
-       col_matcher->num_of_mt = matcher->num_of_mt;
-       col_matcher->attr.priority = matcher->attr.priority;
-       col_matcher->flags = matcher->flags;
-       col_matcher->flags |= MLX5HWS_MATCHER_FLAGS_COLLISION;
-       col_matcher->attr.mode = MLX5HWS_MATCHER_RESOURCE_MODE_HTABLE;
-       col_matcher->attr.optimize_flow_src = matcher->attr.optimize_flow_src;
-       col_matcher->attr.table.sz_row_log = matcher->attr.rule.num_log;
-       col_matcher->attr.table.sz_col_log = MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH;
-       if (col_matcher->attr.table.sz_row_log > MLX5HWS_MATCHER_ASSURED_ROW_RATIO)
-               col_matcher->attr.table.sz_row_log -= MLX5HWS_MATCHER_ASSURED_ROW_RATIO;
-
-       col_matcher->attr.max_num_of_at_attach = matcher->attr.max_num_of_at_attach;
-
-       ret = hws_matcher_process_attr(ctx->caps, col_matcher);
-       if (ret)
-               goto free_col_matcher;
-
-       ret = hws_matcher_create_and_connect(col_matcher);
-       if (ret)
-               goto free_col_matcher;
-
-       matcher->col_matcher = col_matcher;
-
-       return 0;
-
-free_col_matcher:
-       kfree(col_matcher);
-       mlx5hws_err(ctx, "Failed to create assured collision matcher\n");
-       return ret;
-}
-
-static void
-hws_matcher_destroy_col_matcher(struct mlx5hws_matcher *matcher)
-{
-       if (matcher->attr.mode != MLX5HWS_MATCHER_RESOURCE_MODE_RULE ||
-           matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX)
-               return;
-
-       if (matcher->col_matcher) {
-               hws_matcher_destroy_and_disconnect(matcher->col_matcher);
-               kfree(matcher->col_matcher);
-       }
-}
-
-static int hws_matcher_init(struct mlx5hws_matcher *matcher)
-{
-       struct mlx5hws_context *ctx = matcher->tbl->ctx;
-       int ret;
-
-       INIT_LIST_HEAD(&matcher->resize_data);
-
-       mutex_lock(&ctx->ctrl_lock);
-
-       /* Allocate matcher resource and connect to the packet pipe */
-       ret = hws_matcher_create_and_connect(matcher);
-       if (ret)
-               goto unlock_err;
-
-       /* Create additional matcher for collision handling */
-       ret = hws_matcher_create_col_matcher(matcher);
-       if (ret)
-               goto destory_and_disconnect;
-       mutex_unlock(&ctx->ctrl_lock);
-
-       return 0;
-
-destory_and_disconnect:
-       hws_matcher_destroy_and_disconnect(matcher);
-unlock_err:
-       mutex_unlock(&ctx->ctrl_lock);
-       return ret;
-}
-
-static int hws_matcher_uninit(struct mlx5hws_matcher *matcher)
-{
-       struct mlx5hws_context *ctx = matcher->tbl->ctx;
-
-       mutex_lock(&ctx->ctrl_lock);
-       hws_matcher_destroy_col_matcher(matcher);
-       hws_matcher_destroy_and_disconnect(matcher);
-       mutex_unlock(&ctx->ctrl_lock);
-
-       return 0;
-}
-
-int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
-                             struct mlx5hws_action_template *at)
-{
-       bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
-       struct mlx5hws_context *ctx = matcher->tbl->ctx;
-       u32 required_stes;
-       int ret;
-
-       if (!matcher->attr.max_num_of_at_attach) {
-               mlx5hws_dbg(ctx, "Num of current at (%d) exceed allowed value\n",
-                           matcher->num_of_at);
-               return -EOPNOTSUPP;
-       }
-
-       ret = hws_matcher_check_and_process_at(matcher, at);
-       if (ret)
-               return ret;
-
-       required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
-       if (matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes < required_stes) {
-               mlx5hws_dbg(ctx, "Required STEs [%d] exceeds initial action template STE [%d]\n",
-                           required_stes,
-                           matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes);
-               return -ENOMEM;
-       }
-
-       matcher->at[matcher->num_of_at] = *at;
-       matcher->num_of_at += 1;
-       matcher->attr.max_num_of_at_attach -= 1;
-
-       if (matcher->col_matcher)
-               matcher->col_matcher->num_of_at = matcher->num_of_at;
-
-       return 0;
-}
-
-static int
-hws_matcher_set_templates(struct mlx5hws_matcher *matcher,
-                         struct mlx5hws_match_template *mt[],
-                         u8 num_of_mt,
-                         struct mlx5hws_action_template *at[],
-                         u8 num_of_at)
-{
-       struct mlx5hws_context *ctx = matcher->tbl->ctx;
-       int ret = 0;
-       int i;
-
-       if (!num_of_mt || !num_of_at) {
-               mlx5hws_err(ctx, "Number of action/match template cannot be zero\n");
-               return -EOPNOTSUPP;
-       }
-
-       matcher->mt = kcalloc(num_of_mt, sizeof(*matcher->mt), GFP_KERNEL);
-       if (!matcher->mt)
-               return -ENOMEM;
-
-       matcher->at = kcalloc(num_of_at + matcher->attr.max_num_of_at_attach,
-                             sizeof(*matcher->at),
-                             GFP_KERNEL);
-       if (!matcher->at) {
-               mlx5hws_err(ctx, "Failed to allocate action template array\n");
-               ret = -ENOMEM;
-               goto free_mt;
-       }
-
-       for (i = 0; i < num_of_mt; i++)
-               matcher->mt[i] = *mt[i];
-
-       for (i = 0; i < num_of_at; i++)
-               matcher->at[i] = *at[i];
-
-       matcher->num_of_mt = num_of_mt;
-       matcher->num_of_at = num_of_at;
-
-       return 0;
-
-free_mt:
-       kfree(matcher->mt);
-       return ret;
-}
-
-static void
-hws_matcher_unset_templates(struct mlx5hws_matcher *matcher)
-{
-       kfree(matcher->at);
-       kfree(matcher->mt);
-}
-
-struct mlx5hws_matcher *
-mlx5hws_matcher_create(struct mlx5hws_table *tbl,
-                      struct mlx5hws_match_template *mt[],
-                      u8 num_of_mt,
-                      struct mlx5hws_action_template *at[],
-                      u8 num_of_at,
-                      struct mlx5hws_matcher_attr *attr)
-{
-       struct mlx5hws_context *ctx = tbl->ctx;
-       struct mlx5hws_matcher *matcher;
-       int ret;
-
-       matcher = kzalloc(sizeof(*matcher), GFP_KERNEL);
-       if (!matcher)
-               return NULL;
-
-       matcher->tbl = tbl;
-       matcher->attr = *attr;
-
-       ret = hws_matcher_process_attr(tbl->ctx->caps, matcher);
-       if (ret)
-               goto free_matcher;
-
-       ret = hws_matcher_set_templates(matcher, mt, num_of_mt, at, num_of_at);
-       if (ret)
-               goto free_matcher;
-
-       ret = hws_matcher_init(matcher);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to initialise matcher: %d\n", ret);
-               goto unset_templates;
-       }
-
-       return matcher;
-
-unset_templates:
-       hws_matcher_unset_templates(matcher);
-free_matcher:
-       kfree(matcher);
-       return NULL;
-}
-
-int mlx5hws_matcher_destroy(struct mlx5hws_matcher *matcher)
-{
-       hws_matcher_uninit(matcher);
-       hws_matcher_unset_templates(matcher);
-       kfree(matcher);
-       return 0;
-}
-
-struct mlx5hws_match_template *
-mlx5hws_match_template_create(struct mlx5hws_context *ctx,
-                             u32 *match_param,
-                             u32 match_param_sz,
-                             u8 match_criteria_enable)
-{
-       struct mlx5hws_match_template *mt;
-
-       mt = kzalloc(sizeof(*mt), GFP_KERNEL);
-       if (!mt)
-               return NULL;
-
-       mt->match_param = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
-       if (!mt->match_param)
-               goto free_template;
-
-       memcpy(mt->match_param, match_param, match_param_sz);
-       mt->match_criteria_enable = match_criteria_enable;
-
-       return mt;
-
-free_template:
-       kfree(mt);
-       return NULL;
-}
-
-int mlx5hws_match_template_destroy(struct mlx5hws_match_template *mt)
-{
-       kfree(mt->match_param);
-       kfree(mt);
-       return 0;
-}
-
-static int hws_matcher_resize_precheck(struct mlx5hws_matcher *src_matcher,
-                                      struct mlx5hws_matcher *dst_matcher)
-{
-       struct mlx5hws_context *ctx = src_matcher->tbl->ctx;
-       int i;
-
-       if (src_matcher->tbl->type != dst_matcher->tbl->type) {
-               mlx5hws_err(ctx, "Table type mismatch for src/dst matchers\n");
-               return -EINVAL;
-       }
-
-       if (!mlx5hws_matcher_is_resizable(src_matcher) ||
-           !mlx5hws_matcher_is_resizable(dst_matcher)) {
-               mlx5hws_err(ctx, "Src/dst matcher is not resizable\n");
-               return -EINVAL;
-       }
-
-       if (mlx5hws_matcher_is_insert_by_idx(src_matcher) !=
-           mlx5hws_matcher_is_insert_by_idx(dst_matcher)) {
-               mlx5hws_err(ctx, "Src/dst matchers insert mode mismatch\n");
-               return -EINVAL;
-       }
-
-       if (mlx5hws_matcher_is_in_resize(src_matcher) ||
-           mlx5hws_matcher_is_in_resize(dst_matcher)) {
-               mlx5hws_err(ctx, "Src/dst matcher is already in resize\n");
-               return -EINVAL;
-       }
-
-       /* Compare match templates - make sure the definers are equivalent */
-       if (src_matcher->num_of_mt != dst_matcher->num_of_mt) {
-               mlx5hws_err(ctx, "Src/dst matcher match templates mismatch\n");
-               return -EINVAL;
-       }
-
-       if (src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes >
-           dst_matcher->action_ste[0].max_stes) {
-               mlx5hws_err(ctx, "Src/dst matcher max STEs mismatch\n");
-               return -EINVAL;
-       }
-
-       for (i = 0; i < src_matcher->num_of_mt; i++) {
-               if (mlx5hws_definer_compare(src_matcher->mt[i].definer,
-                                           dst_matcher->mt[i].definer)) {
-                       mlx5hws_err(ctx, "Src/dst matcher definers mismatch\n");
-                       return -EINVAL;
-               }
-       }
-
-       return 0;
-}
-
-int mlx5hws_matcher_resize_set_target(struct mlx5hws_matcher *src_matcher,
-                                     struct mlx5hws_matcher *dst_matcher)
-{
-       int ret = 0;
-
-       mutex_lock(&src_matcher->tbl->ctx->ctrl_lock);
-
-       ret = hws_matcher_resize_precheck(src_matcher, dst_matcher);
-       if (ret)
-               goto out;
-
-       src_matcher->resize_dst = dst_matcher;
-
-       ret = hws_matcher_resize_init(src_matcher);
-       if (ret)
-               src_matcher->resize_dst = NULL;
-
-out:
-       mutex_unlock(&src_matcher->tbl->ctx->ctrl_lock);
-       return ret;
-}
-
-int mlx5hws_matcher_resize_rule_move(struct mlx5hws_matcher *src_matcher,
-                                    struct mlx5hws_rule *rule,
-                                    struct mlx5hws_rule_attr *attr)
-{
-       struct mlx5hws_context *ctx = src_matcher->tbl->ctx;
-
-       if (unlikely(!mlx5hws_matcher_is_in_resize(src_matcher))) {
-               mlx5hws_err(ctx, "Matcher is not resizable or not in resize\n");
-               return -EINVAL;
-       }
-
-       if (unlikely(src_matcher != rule->matcher)) {
-               mlx5hws_err(ctx, "Rule doesn't belong to src matcher\n");
-               return -EINVAL;
-       }
-
-       return mlx5hws_rule_move_hws_add(rule, attr);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h
deleted file mode 100644 (file)
index 125391d..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#ifndef MLX5HWS_MATCHER_H_
-#define MLX5HWS_MATCHER_H_
-
-/* We calculated that concatenating a collision table to the main table with
- * 3% of the main table rows will be enough resources for high insertion
- * success probability.
- *
- * The calculation: log2(2^x * 3 / 100) = log2(2^x) + log2(3/100) = x - 5.05 ~ 5
- */
-#define MLX5HWS_MATCHER_ASSURED_ROW_RATIO 5
-/* Threshold to determine if amount of rules require a collision table */
-#define MLX5HWS_MATCHER_ASSURED_RULES_TH 10
-/* Required depth of an assured collision table */
-#define MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH 4
-/* Required depth of the main large table */
-#define MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH 2
-
-enum mlx5hws_matcher_offset {
-       MLX5HWS_MATCHER_OFFSET_TAG_DW1 = 12,
-       MLX5HWS_MATCHER_OFFSET_TAG_DW0 = 13,
-};
-
-enum mlx5hws_matcher_flags {
-       MLX5HWS_MATCHER_FLAGS_COLLISION = 1 << 2,
-       MLX5HWS_MATCHER_FLAGS_RESIZABLE = 1 << 3,
-};
-
-struct mlx5hws_match_template {
-       struct mlx5hws_definer *definer;
-       struct mlx5hws_definer_fc *fc;
-       u32 *match_param;
-       u8 match_criteria_enable;
-       u16 fc_sz;
-};
-
-struct mlx5hws_matcher_match_ste {
-       struct mlx5hws_pool_chunk ste;
-       u32 rtc_0_id;
-       u32 rtc_1_id;
-       struct mlx5hws_pool *pool;
-};
-
-struct mlx5hws_matcher_action_ste {
-       struct mlx5hws_pool_chunk ste;
-       struct mlx5hws_pool_chunk stc;
-       u32 rtc_0_id;
-       u32 rtc_1_id;
-       struct mlx5hws_pool *pool;
-       u8 max_stes;
-};
-
-struct mlx5hws_matcher_resize_data_node {
-       struct mlx5hws_pool_chunk stc;
-       u32 rtc_0_id;
-       u32 rtc_1_id;
-       struct mlx5hws_pool *pool;
-};
-
-struct mlx5hws_matcher_resize_data {
-       struct mlx5hws_matcher_resize_data_node action_ste[2];
-       u8 max_stes;
-       struct list_head list_node;
-};
-
-struct mlx5hws_matcher {
-       struct mlx5hws_table *tbl;
-       struct mlx5hws_matcher_attr attr;
-       struct mlx5hws_match_template *mt;
-       struct mlx5hws_action_template *at;
-       u8 num_of_at;
-       u8 num_of_mt;
-       /* enum mlx5hws_matcher_flags */
-       u8 flags;
-       u32 end_ft_id;
-       struct mlx5hws_matcher *col_matcher;
-       struct mlx5hws_matcher *resize_dst;
-       struct mlx5hws_matcher_match_ste match_ste;
-       struct mlx5hws_matcher_action_ste action_ste[2];
-       struct list_head list_node;
-       struct list_head resize_data;
-};
-
-static inline bool
-mlx5hws_matcher_mt_is_jumbo(struct mlx5hws_match_template *mt)
-{
-       return mlx5hws_definer_is_jumbo(mt->definer);
-}
-
-static inline bool mlx5hws_matcher_is_resizable(struct mlx5hws_matcher *matcher)
-{
-       return !!(matcher->flags & MLX5HWS_MATCHER_FLAGS_RESIZABLE);
-}
-
-static inline bool mlx5hws_matcher_is_in_resize(struct mlx5hws_matcher *matcher)
-{
-       return !!matcher->resize_dst;
-}
-
-static inline bool mlx5hws_matcher_is_insert_by_idx(struct mlx5hws_matcher *matcher)
-{
-       return matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX;
-}
-
-#endif /* MLX5HWS_MATCHER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c
deleted file mode 100644 (file)
index e084a5c..0000000
+++ /dev/null
@@ -1,579 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#include "mlx5hws_internal.h"
-
-enum mlx5hws_arg_chunk_size
-mlx5hws_arg_data_size_to_arg_log_size(u16 data_size)
-{
-       /* Return the roundup of log2(data_size) */
-       if (data_size <= MLX5HWS_ARG_DATA_SIZE)
-               return MLX5HWS_ARG_CHUNK_SIZE_1;
-       if (data_size <= MLX5HWS_ARG_DATA_SIZE * 2)
-               return MLX5HWS_ARG_CHUNK_SIZE_2;
-       if (data_size <= MLX5HWS_ARG_DATA_SIZE * 4)
-               return MLX5HWS_ARG_CHUNK_SIZE_3;
-       if (data_size <= MLX5HWS_ARG_DATA_SIZE * 8)
-               return MLX5HWS_ARG_CHUNK_SIZE_4;
-
-       return MLX5HWS_ARG_CHUNK_SIZE_MAX;
-}
-
-u32 mlx5hws_arg_data_size_to_arg_size(u16 data_size)
-{
-       return BIT(mlx5hws_arg_data_size_to_arg_log_size(data_size));
-}
-
-enum mlx5hws_arg_chunk_size
-mlx5hws_arg_get_arg_log_size(u16 num_of_actions)
-{
-       return mlx5hws_arg_data_size_to_arg_log_size(num_of_actions *
-                                                   MLX5HWS_MODIFY_ACTION_SIZE);
-}
-
-u32 mlx5hws_arg_get_arg_size(u16 num_of_actions)
-{
-       return BIT(mlx5hws_arg_get_arg_log_size(num_of_actions));
-}
-
-bool mlx5hws_pat_require_reparse(__be64 *actions, u16 num_of_actions)
-{
-       u16 i, field;
-       u8 action_id;
-
-       for (i = 0; i < num_of_actions; i++) {
-               action_id = MLX5_GET(set_action_in, &actions[i], action_type);
-
-               switch (action_id) {
-               case MLX5_MODIFICATION_TYPE_NOP:
-                       field = MLX5_MODI_OUT_NONE;
-                       break;
-
-               case MLX5_MODIFICATION_TYPE_SET:
-               case MLX5_MODIFICATION_TYPE_ADD:
-                       field = MLX5_GET(set_action_in, &actions[i], field);
-                       break;
-
-               case MLX5_MODIFICATION_TYPE_COPY:
-               case MLX5_MODIFICATION_TYPE_ADD_FIELD:
-                       field = MLX5_GET(copy_action_in, &actions[i], dst_field);
-                       break;
-
-               default:
-                       /* Insert/Remove/Unknown actions require reparse */
-                       return true;
-               }
-
-               /* Below fields can change packet structure require a reparse */
-               if (field == MLX5_MODI_OUT_ETHERTYPE ||
-                   field == MLX5_MODI_OUT_IPV6_NEXT_HDR)
-                       return true;
-       }
-
-       return false;
-}
-
-/* Cache and cache element handling */
-int mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache **cache)
-{
-       struct mlx5hws_pattern_cache *new_cache;
-
-       new_cache = kzalloc(sizeof(*new_cache), GFP_KERNEL);
-       if (!new_cache)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&new_cache->ptrn_list);
-       mutex_init(&new_cache->lock);
-
-       *cache = new_cache;
-
-       return 0;
-}
-
-void mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache *cache)
-{
-       mutex_destroy(&cache->lock);
-       kfree(cache);
-}
-
-static bool mlx5hws_pat_compare_pattern(int cur_num_of_actions,
-                                       __be64 cur_actions[],
-                                       int num_of_actions,
-                                       __be64 actions[])
-{
-       int i;
-
-       if (cur_num_of_actions != num_of_actions)
-               return false;
-
-       for (i = 0; i < num_of_actions; i++) {
-               u8 action_id =
-                       MLX5_GET(set_action_in, &actions[i], action_type);
-
-               if (action_id == MLX5_MODIFICATION_TYPE_COPY ||
-                   action_id == MLX5_MODIFICATION_TYPE_ADD_FIELD) {
-                       if (actions[i] != cur_actions[i])
-                               return false;
-               } else {
-                       /* Compare just the control, not the values */
-                       if ((__force __be32)actions[i] !=
-                           (__force __be32)cur_actions[i])
-                               return false;
-               }
-       }
-
-       return true;
-}
-
-static struct mlx5hws_pattern_cache_item *
-mlx5hws_pat_find_cached_pattern(struct mlx5hws_pattern_cache *cache,
-                               u16 num_of_actions,
-                               __be64 *actions)
-{
-       struct mlx5hws_pattern_cache_item *cached_pat = NULL;
-
-       list_for_each_entry(cached_pat, &cache->ptrn_list, ptrn_list_node) {
-               if (mlx5hws_pat_compare_pattern(cached_pat->mh_data.num_of_actions,
-                                               (__be64 *)cached_pat->mh_data.data,
-                                               num_of_actions,
-                                               actions))
-                       return cached_pat;
-       }
-
-       return NULL;
-}
-
-static struct mlx5hws_pattern_cache_item *
-mlx5hws_pat_get_existing_cached_pattern(struct mlx5hws_pattern_cache *cache,
-                                       u16 num_of_actions,
-                                       __be64 *actions)
-{
-       struct mlx5hws_pattern_cache_item *cached_pattern;
-
-       cached_pattern = mlx5hws_pat_find_cached_pattern(cache, num_of_actions, actions);
-       if (cached_pattern) {
-               /* LRU: move it to be first in the list */
-               list_del_init(&cached_pattern->ptrn_list_node);
-               list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
-               cached_pattern->refcount++;
-       }
-
-       return cached_pattern;
-}
-
-static struct mlx5hws_pattern_cache_item *
-mlx5hws_pat_add_pattern_to_cache(struct mlx5hws_pattern_cache *cache,
-                                u32 pattern_id,
-                                u16 num_of_actions,
-                                __be64 *actions)
-{
-       struct mlx5hws_pattern_cache_item *cached_pattern;
-
-       cached_pattern = kzalloc(sizeof(*cached_pattern), GFP_KERNEL);
-       if (!cached_pattern)
-               return NULL;
-
-       cached_pattern->mh_data.num_of_actions = num_of_actions;
-       cached_pattern->mh_data.pattern_id = pattern_id;
-       cached_pattern->mh_data.data =
-               kmemdup(actions, num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL);
-       if (!cached_pattern->mh_data.data)
-               goto free_cached_obj;
-
-       list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
-       cached_pattern->refcount = 1;
-
-       return cached_pattern;
-
-free_cached_obj:
-       kfree(cached_pattern);
-       return NULL;
-}
-
-static struct mlx5hws_pattern_cache_item *
-mlx5hws_pat_find_cached_pattern_by_id(struct mlx5hws_pattern_cache *cache,
-                                     u32 ptrn_id)
-{
-       struct mlx5hws_pattern_cache_item *cached_pattern = NULL;
-
-       list_for_each_entry(cached_pattern, &cache->ptrn_list, ptrn_list_node) {
-               if (cached_pattern->mh_data.pattern_id == ptrn_id)
-                       return cached_pattern;
-       }
-
-       return NULL;
-}
-
-static void
-mlx5hws_pat_remove_pattern(struct mlx5hws_pattern_cache_item *cached_pattern)
-{
-       list_del_init(&cached_pattern->ptrn_list_node);
-
-       kfree(cached_pattern->mh_data.data);
-       kfree(cached_pattern);
-}
-
-void mlx5hws_pat_put_pattern(struct mlx5hws_context *ctx, u32 ptrn_id)
-{
-       struct mlx5hws_pattern_cache *cache = ctx->pattern_cache;
-       struct mlx5hws_pattern_cache_item *cached_pattern;
-
-       mutex_lock(&cache->lock);
-       cached_pattern = mlx5hws_pat_find_cached_pattern_by_id(cache, ptrn_id);
-       if (!cached_pattern) {
-               mlx5hws_err(ctx, "Failed to find cached pattern with provided ID\n");
-               pr_warn("HWS: pattern ID %d is not found\n", ptrn_id);
-               goto out;
-       }
-
-       if (--cached_pattern->refcount)
-               goto out;
-
-       mlx5hws_pat_remove_pattern(cached_pattern);
-       mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, ptrn_id);
-
-out:
-       mutex_unlock(&cache->lock);
-}
-
-int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx,
-                           __be64 *pattern, size_t pattern_sz,
-                           u32 *pattern_id)
-{
-       u16 num_of_actions = pattern_sz / MLX5HWS_MODIFY_ACTION_SIZE;
-       struct mlx5hws_pattern_cache_item *cached_pattern;
-       u32 ptrn_id = 0;
-       int ret = 0;
-
-       mutex_lock(&ctx->pattern_cache->lock);
-
-       cached_pattern = mlx5hws_pat_get_existing_cached_pattern(ctx->pattern_cache,
-                                                                num_of_actions,
-                                                                pattern);
-       if (cached_pattern) {
-               *pattern_id = cached_pattern->mh_data.pattern_id;
-               goto out_unlock;
-       }
-
-       ret = mlx5hws_cmd_header_modify_pattern_create(ctx->mdev,
-                                                      pattern_sz,
-                                                      (u8 *)pattern,
-                                                      &ptrn_id);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to create pattern FW object\n");
-               goto out_unlock;
-       }
-
-       cached_pattern = mlx5hws_pat_add_pattern_to_cache(ctx->pattern_cache,
-                                                         ptrn_id,
-                                                         num_of_actions,
-                                                         pattern);
-       if (!cached_pattern) {
-               mlx5hws_err(ctx, "Failed to add pattern to cache\n");
-               ret = -EINVAL;
-               goto clean_pattern;
-       }
-
-       mutex_unlock(&ctx->pattern_cache->lock);
-       *pattern_id = ptrn_id;
-
-       return ret;
-
-clean_pattern:
-       mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, *pattern_id);
-out_unlock:
-       mutex_unlock(&ctx->pattern_cache->lock);
-       return ret;
-}
-
-static void
-mlx5d_arg_init_send_attr(struct mlx5hws_send_engine_post_attr *send_attr,
-                        void *comp_data,
-                        u32 arg_idx)
-{
-       send_attr->opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
-       send_attr->opmod = MLX5HWS_WQE_GTA_OPMOD_MOD_ARG;
-       send_attr->len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
-       send_attr->id = arg_idx;
-       send_attr->user_data = comp_data;
-}
-
-void mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine *queue,
-                              u32 arg_idx,
-                              u8 *arg_data,
-                              u16 num_of_actions)
-{
-       struct mlx5hws_send_engine_post_attr send_attr = {0};
-       struct mlx5hws_wqe_gta_data_seg_arg *wqe_arg = NULL;
-       struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl = NULL;
-       struct mlx5hws_send_engine_post_ctrl ctrl;
-       size_t wqe_len;
-
-       mlx5d_arg_init_send_attr(&send_attr, NULL, arg_idx);
-
-       ctrl = mlx5hws_send_engine_post_start(queue);
-       mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
-       memset(wqe_ctrl, 0, wqe_len);
-       mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
-       mlx5hws_action_prepare_decap_l3_data(arg_data, (u8 *)wqe_arg,
-                                            num_of_actions);
-       mlx5hws_send_engine_post_end(&ctrl, &send_attr);
-}
-
-void mlx5hws_arg_write(struct mlx5hws_send_engine *queue,
-                      void *comp_data,
-                      u32 arg_idx,
-                      u8 *arg_data,
-                      size_t data_size)
-{
-       struct mlx5hws_send_engine_post_attr send_attr = {0};
-       struct mlx5hws_wqe_gta_data_seg_arg *wqe_arg;
-       struct mlx5hws_send_engine_post_ctrl ctrl;
-       struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
-       int i, full_iter, leftover;
-       size_t wqe_len;
-
-       mlx5d_arg_init_send_attr(&send_attr, comp_data, arg_idx);
-
-       /* Each WQE can hold 64B of data, it might require multiple iteration */
-       full_iter = data_size / MLX5HWS_ARG_DATA_SIZE;
-       leftover = data_size & (MLX5HWS_ARG_DATA_SIZE - 1);
-
-       for (i = 0; i < full_iter; i++) {
-               ctrl = mlx5hws_send_engine_post_start(queue);
-               mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
-               memset(wqe_ctrl, 0, wqe_len);
-               mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
-               memcpy(wqe_arg, arg_data, wqe_len);
-               send_attr.id = arg_idx++;
-               mlx5hws_send_engine_post_end(&ctrl, &send_attr);
-
-               /* Move to next argument data */
-               arg_data += MLX5HWS_ARG_DATA_SIZE;
-       }
-
-       if (leftover) {
-               ctrl = mlx5hws_send_engine_post_start(queue);
-               mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
-               memset(wqe_ctrl, 0, wqe_len);
-               mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
-               memcpy(wqe_arg, arg_data, leftover);
-               send_attr.id = arg_idx;
-               mlx5hws_send_engine_post_end(&ctrl, &send_attr);
-       }
-}
-
-int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx,
-                                     u32 arg_idx,
-                                     u8 *arg_data,
-                                     size_t data_size)
-{
-       struct mlx5hws_send_engine *queue;
-       int ret;
-
-       mutex_lock(&ctx->ctrl_lock);
-
-       /* Get the control queue */
-       queue = &ctx->send_queue[ctx->queues - 1];
-
-       mlx5hws_arg_write(queue, arg_data, arg_idx, arg_data, data_size);
-
-       mlx5hws_send_engine_flush_queue(queue);
-
-       /* Poll for completion */
-       ret = mlx5hws_send_queue_action(ctx, ctx->queues - 1,
-                                       MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
-
-       if (ret)
-               mlx5hws_err(ctx, "Failed to drain arg queue\n");
-
-       mutex_unlock(&ctx->ctrl_lock);
-
-       return ret;
-}
-
-bool mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context *ctx,
-                                          u32 arg_size)
-{
-       if (arg_size < ctx->caps->log_header_modify_argument_granularity ||
-           arg_size > ctx->caps->log_header_modify_argument_max_alloc) {
-               return false;
-       }
-       return true;
-}
-
-int mlx5hws_arg_create(struct mlx5hws_context *ctx,
-                      u8 *data,
-                      size_t data_sz,
-                      u32 log_bulk_sz,
-                      bool write_data,
-                      u32 *arg_id)
-{
-       u16 single_arg_log_sz;
-       u16 multi_arg_log_sz;
-       int ret;
-       u32 id;
-
-       single_arg_log_sz = mlx5hws_arg_data_size_to_arg_log_size(data_sz);
-       multi_arg_log_sz = single_arg_log_sz + log_bulk_sz;
-
-       if (single_arg_log_sz >= MLX5HWS_ARG_CHUNK_SIZE_MAX) {
-               mlx5hws_err(ctx, "Requested single arg %u not supported\n", single_arg_log_sz);
-               return -EOPNOTSUPP;
-       }
-
-       if (!mlx5hws_arg_is_valid_arg_request_size(ctx, multi_arg_log_sz)) {
-               mlx5hws_err(ctx, "Argument log size %d not supported by FW\n", multi_arg_log_sz);
-               return -EOPNOTSUPP;
-       }
-
-       /* Alloc bulk of args */
-       ret = mlx5hws_cmd_arg_create(ctx->mdev, multi_arg_log_sz, ctx->pd_num, &id);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed allocating arg in order: %d\n", multi_arg_log_sz);
-               return ret;
-       }
-
-       if (write_data) {
-               ret = mlx5hws_arg_write_inline_arg_data(ctx, id,
-                                                       data, data_sz);
-               if (ret) {
-                       mlx5hws_err(ctx, "Failed writing arg data\n");
-                       mlx5hws_cmd_arg_destroy(ctx->mdev, id);
-                       return ret;
-               }
-       }
-
-       *arg_id = id;
-       return ret;
-}
-
-void mlx5hws_arg_destroy(struct mlx5hws_context *ctx, u32 arg_id)
-{
-       mlx5hws_cmd_arg_destroy(ctx->mdev, arg_id);
-}
-
-int mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context *ctx,
-                                        __be64 *data,
-                                        u8 num_of_actions,
-                                        u32 log_bulk_sz,
-                                        bool write_data,
-                                        u32 *arg_id)
-{
-       size_t data_sz = num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE;
-       int ret;
-
-       ret = mlx5hws_arg_create(ctx,
-                                (u8 *)data,
-                                data_sz,
-                                log_bulk_sz,
-                                write_data,
-                                arg_id);
-       if (ret)
-               mlx5hws_err(ctx, "Failed creating modify header arg\n");
-
-       return ret;
-}
-
-static int
-hws_action_modify_check_field_limitation(u8 action_type, __be64 *pattern)
-{
-       /* Need to check field limitation here, but for now - return OK */
-       return 0;
-}
-
-#define INVALID_FIELD 0xffff
-
-static void
-hws_action_modify_get_target_fields(u8 action_type, __be64 *pattern,
-                                   u16 *src_field, u16 *dst_field)
-{
-       switch (action_type) {
-       case MLX5_ACTION_TYPE_SET:
-       case MLX5_ACTION_TYPE_ADD:
-               *src_field = MLX5_GET(set_action_in, pattern, field);
-               *dst_field = INVALID_FIELD;
-               break;
-       case MLX5_ACTION_TYPE_COPY:
-               *src_field = MLX5_GET(copy_action_in, pattern, src_field);
-               *dst_field = MLX5_GET(copy_action_in, pattern, dst_field);
-               break;
-       default:
-               pr_warn("HWS: invalid modify header action type %d\n", action_type);
-       }
-}
-
-bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], size_t sz)
-{
-       size_t i;
-
-       for (i = 0; i < sz / MLX5HWS_MODIFY_ACTION_SIZE; i++) {
-               u8 action_type =
-                       MLX5_GET(set_action_in, &pattern[i], action_type);
-               if (action_type >= MLX5_MODIFICATION_TYPE_MAX) {
-                       mlx5hws_err(ctx, "Unsupported action id %d\n", action_type);
-                       return false;
-               }
-               if (hws_action_modify_check_field_limitation(action_type, &pattern[i])) {
-                       mlx5hws_err(ctx, "Unsupported action number %zu\n", i);
-                       return false;
-               }
-       }
-
-       return true;
-}
-
-void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions,
-                          size_t max_actions, size_t *new_size,
-                          u32 *nope_location, __be64 *new_pat)
-{
-       u16 prev_src_field = 0, prev_dst_field = 0;
-       u16 src_field, dst_field;
-       u8 action_type;
-       size_t i, j;
-
-       *new_size = num_actions;
-       *nope_location = 0;
-
-       if (num_actions == 1)
-               return;
-
-       for (i = 0, j = 0; i < num_actions; i++, j++) {
-               action_type = MLX5_GET(set_action_in, &pattern[i], action_type);
-
-               hws_action_modify_get_target_fields(action_type, &pattern[i],
-                                                   &src_field, &dst_field);
-               if (i % 2) {
-                       if (action_type == MLX5_ACTION_TYPE_COPY &&
-                           (prev_src_field == src_field ||
-                            prev_dst_field == dst_field)) {
-                               /* need Nope */
-                               *new_size += 1;
-                               *nope_location |= BIT(i);
-                               memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE);
-                               MLX5_SET(set_action_in, &new_pat[j],
-                                        action_type,
-                                        MLX5_MODIFICATION_TYPE_NOP);
-                               j++;
-                       } else if (prev_src_field == src_field) {
-                               /* need Nope*/
-                               *new_size += 1;
-                               *nope_location |= BIT(i);
-                               MLX5_SET(set_action_in, &new_pat[j],
-                                        action_type,
-                                        MLX5_MODIFICATION_TYPE_NOP);
-                               j++;
-                       }
-               }
-               memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE);
-               /* check if no more space */
-               if (j > max_actions) {
-                       *new_size = num_actions;
-                       *nope_location = 0;
-                       return;
-               }
-
-               prev_src_field = src_field;
-               prev_dst_field = dst_field;
-       }
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h
deleted file mode 100644 (file)
index 27ca933..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#ifndef MLX5HWS_PAT_ARG_H_
-#define MLX5HWS_PAT_ARG_H_
-
-/* Modify-header arg pool */
-enum mlx5hws_arg_chunk_size {
-       MLX5HWS_ARG_CHUNK_SIZE_1,
-       /* Keep MIN updated when changing */
-       MLX5HWS_ARG_CHUNK_SIZE_MIN = MLX5HWS_ARG_CHUNK_SIZE_1,
-       MLX5HWS_ARG_CHUNK_SIZE_2,
-       MLX5HWS_ARG_CHUNK_SIZE_3,
-       MLX5HWS_ARG_CHUNK_SIZE_4,
-       MLX5HWS_ARG_CHUNK_SIZE_MAX,
-};
-
-enum {
-       MLX5HWS_MODIFY_ACTION_SIZE = 8,
-       MLX5HWS_ARG_DATA_SIZE = 64,
-};
-
-struct mlx5hws_pattern_cache {
-       struct mutex lock; /* Protect pattern list */
-       struct list_head ptrn_list;
-};
-
-struct mlx5hws_pattern_cache_item {
-       struct {
-               u32 pattern_id;
-               u8 *data;
-               u16 num_of_actions;
-       } mh_data;
-       u32 refcount;
-       struct list_head ptrn_list_node;
-};
-
-enum mlx5hws_arg_chunk_size
-mlx5hws_arg_get_arg_log_size(u16 num_of_actions);
-
-u32 mlx5hws_arg_get_arg_size(u16 num_of_actions);
-
-enum mlx5hws_arg_chunk_size
-mlx5hws_arg_data_size_to_arg_log_size(u16 data_size);
-
-u32 mlx5hws_arg_data_size_to_arg_size(u16 data_size);
-
-int mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache **cache);
-
-void mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache *cache);
-
-bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], size_t sz);
-
-int mlx5hws_arg_create(struct mlx5hws_context *ctx,
-                      u8 *data,
-                      size_t data_sz,
-                      u32 log_bulk_sz,
-                      bool write_data,
-                      u32 *arg_id);
-
-void mlx5hws_arg_destroy(struct mlx5hws_context *ctx, u32 arg_id);
-
-int mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context *ctx,
-                                        __be64 *data,
-                                        u8 num_of_actions,
-                                        u32 log_bulk_sz,
-                                        bool write_data,
-                                        u32 *modify_hdr_arg_id);
-
-int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx,
-                           __be64 *pattern,
-                           size_t pattern_sz,
-                           u32 *ptrn_id);
-
-void mlx5hws_pat_put_pattern(struct mlx5hws_context *ctx,
-                            u32 ptrn_id);
-
-bool mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context *ctx,
-                                          u32 arg_size);
-
-bool mlx5hws_pat_require_reparse(__be64 *actions, u16 num_of_actions);
-
-void mlx5hws_arg_write(struct mlx5hws_send_engine *queue,
-                      void *comp_data,
-                      u32 arg_idx,
-                      u8 *arg_data,
-                      size_t data_size);
-
-void mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine *queue,
-                              u32 arg_idx,
-                              u8 *arg_data,
-                              u16 num_of_actions);
-
-int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx,
-                                     u32 arg_idx,
-                                     u8 *arg_data,
-                                     size_t data_size);
-
-void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions, size_t max_actions,
-                          size_t *new_size, u32 *nope_location, __be64 *new_pat);
-#endif /* MLX5HWS_PAT_ARG_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c
deleted file mode 100644 (file)
index a8a63e3..0000000
+++ /dev/null
@@ -1,640 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#include "mlx5hws_internal.h"
-#include "mlx5hws_buddy.h"
-
-static void hws_pool_free_one_resource(struct mlx5hws_pool_resource *resource)
-{
-       switch (resource->pool->type) {
-       case MLX5HWS_POOL_TYPE_STE:
-               mlx5hws_cmd_ste_destroy(resource->pool->ctx->mdev, resource->base_id);
-               break;
-       case MLX5HWS_POOL_TYPE_STC:
-               mlx5hws_cmd_stc_destroy(resource->pool->ctx->mdev, resource->base_id);
-               break;
-       default:
-               break;
-       }
-
-       kfree(resource);
-}
-
-static void hws_pool_resource_free(struct mlx5hws_pool *pool,
-                                  int resource_idx)
-{
-       hws_pool_free_one_resource(pool->resource[resource_idx]);
-       pool->resource[resource_idx] = NULL;
-
-       if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
-               hws_pool_free_one_resource(pool->mirror_resource[resource_idx]);
-               pool->mirror_resource[resource_idx] = NULL;
-       }
-}
-
-static struct mlx5hws_pool_resource *
-hws_pool_create_one_resource(struct mlx5hws_pool *pool, u32 log_range,
-                            u32 fw_ft_type)
-{
-       struct mlx5hws_cmd_ste_create_attr ste_attr;
-       struct mlx5hws_cmd_stc_create_attr stc_attr;
-       struct mlx5hws_pool_resource *resource;
-       u32 obj_id = 0;
-       int ret;
-
-       resource = kzalloc(sizeof(*resource), GFP_KERNEL);
-       if (!resource)
-               return NULL;
-
-       switch (pool->type) {
-       case MLX5HWS_POOL_TYPE_STE:
-               ste_attr.log_obj_range = log_range;
-               ste_attr.table_type = fw_ft_type;
-               ret = mlx5hws_cmd_ste_create(pool->ctx->mdev, &ste_attr, &obj_id);
-               break;
-       case MLX5HWS_POOL_TYPE_STC:
-               stc_attr.log_obj_range = log_range;
-               stc_attr.table_type = fw_ft_type;
-               ret = mlx5hws_cmd_stc_create(pool->ctx->mdev, &stc_attr, &obj_id);
-               break;
-       default:
-               ret = -EINVAL;
-       }
-
-       if (ret) {
-               mlx5hws_err(pool->ctx, "Failed to allocate resource objects\n");
-               goto free_resource;
-       }
-
-       resource->pool = pool;
-       resource->range = 1 << log_range;
-       resource->base_id = obj_id;
-
-       return resource;
-
-free_resource:
-       kfree(resource);
-       return NULL;
-}
-
-static int
-hws_pool_resource_alloc(struct mlx5hws_pool *pool, u32 log_range, int idx)
-{
-       struct mlx5hws_pool_resource *resource;
-       u32 fw_ft_type, opt_log_range;
-
-       fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, false);
-       opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_ORIG ? 0 : log_range;
-       resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
-       if (!resource) {
-               mlx5hws_err(pool->ctx, "Failed allocating resource\n");
-               return -EINVAL;
-       }
-
-       pool->resource[idx] = resource;
-
-       if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
-               struct mlx5hws_pool_resource *mirror_resource;
-
-               fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, true);
-               opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_MIRROR ? 0 : log_range;
-               mirror_resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
-               if (!mirror_resource) {
-                       mlx5hws_err(pool->ctx, "Failed allocating mirrored resource\n");
-                       hws_pool_free_one_resource(resource);
-                       pool->resource[idx] = NULL;
-                       return -EINVAL;
-               }
-               pool->mirror_resource[idx] = mirror_resource;
-       }
-
-       return 0;
-}
-
-static unsigned long *hws_pool_create_and_init_bitmap(u32 log_range)
-{
-       unsigned long *cur_bmp;
-
-       cur_bmp = bitmap_zalloc(1 << log_range, GFP_KERNEL);
-       if (!cur_bmp)
-               return NULL;
-
-       bitmap_fill(cur_bmp, 1 << log_range);
-
-       return cur_bmp;
-}
-
-static void hws_pool_buddy_db_put_chunk(struct mlx5hws_pool *pool,
-                                       struct mlx5hws_pool_chunk *chunk)
-{
-       struct mlx5hws_buddy_mem *buddy;
-
-       buddy = pool->db.buddy_manager->buddies[chunk->resource_idx];
-       if (!buddy) {
-               mlx5hws_err(pool->ctx, "No such buddy (%d)\n", chunk->resource_idx);
-               return;
-       }
-
-       mlx5hws_buddy_free_mem(buddy, chunk->offset, chunk->order);
-}
-
-static struct mlx5hws_buddy_mem *
-hws_pool_buddy_get_next_buddy(struct mlx5hws_pool *pool, int idx,
-                             u32 order, bool *is_new_buddy)
-{
-       static struct mlx5hws_buddy_mem *buddy;
-       u32 new_buddy_size;
-
-       buddy = pool->db.buddy_manager->buddies[idx];
-       if (buddy)
-               return buddy;
-
-       new_buddy_size = max(pool->alloc_log_sz, order);
-       *is_new_buddy = true;
-       buddy = mlx5hws_buddy_create(new_buddy_size);
-       if (!buddy) {
-               mlx5hws_err(pool->ctx, "Failed to create buddy order: %d index: %d\n",
-                           new_buddy_size, idx);
-               return NULL;
-       }
-
-       if (hws_pool_resource_alloc(pool, new_buddy_size, idx) != 0) {
-               mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
-                           pool->type, new_buddy_size, idx);
-               mlx5hws_buddy_cleanup(buddy);
-               return NULL;
-       }
-
-       pool->db.buddy_manager->buddies[idx] = buddy;
-
-       return buddy;
-}
-
-static int hws_pool_buddy_get_mem_chunk(struct mlx5hws_pool *pool,
-                                       int order,
-                                       u32 *buddy_idx,
-                                       int *seg)
-{
-       struct mlx5hws_buddy_mem *buddy;
-       bool new_mem = false;
-       int ret = 0;
-       int i;
-
-       *seg = -1;
-
-       /* Find the next free place from the buddy array */
-       while (*seg == -1) {
-               for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
-                       buddy = hws_pool_buddy_get_next_buddy(pool, i,
-                                                             order,
-                                                             &new_mem);
-                       if (!buddy) {
-                               ret = -ENOMEM;
-                               goto out;
-                       }
-
-                       *seg = mlx5hws_buddy_alloc_mem(buddy, order);
-                       if (*seg != -1)
-                               goto found;
-
-                       if (pool->flags & MLX5HWS_POOL_FLAGS_ONE_RESOURCE) {
-                               mlx5hws_err(pool->ctx,
-                                           "Fail to allocate seg for one resource pool\n");
-                               ret = -ENOMEM;
-                               goto out;
-                       }
-
-                       if (new_mem) {
-                               /* We have new memory pool, should be place for us */
-                               mlx5hws_err(pool->ctx,
-                                           "No memory for order: %d with buddy no: %d\n",
-                                           order, i);
-                               ret = -ENOMEM;
-                               goto out;
-                       }
-               }
-       }
-
-found:
-       *buddy_idx = i;
-out:
-       return ret;
-}
-
-static int hws_pool_buddy_db_get_chunk(struct mlx5hws_pool *pool,
-                                      struct mlx5hws_pool_chunk *chunk)
-{
-       int ret = 0;
-
-       /* Go over the buddies and find next free slot */
-       ret = hws_pool_buddy_get_mem_chunk(pool, chunk->order,
-                                          &chunk->resource_idx,
-                                          &chunk->offset);
-       if (ret)
-               mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
-                           chunk->order);
-
-       return ret;
-}
-
-static void hws_pool_buddy_db_uninit(struct mlx5hws_pool *pool)
-{
-       struct mlx5hws_buddy_mem *buddy;
-       int i;
-
-       for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
-               buddy = pool->db.buddy_manager->buddies[i];
-               if (buddy) {
-                       mlx5hws_buddy_cleanup(buddy);
-                       kfree(buddy);
-                       pool->db.buddy_manager->buddies[i] = NULL;
-               }
-       }
-
-       kfree(pool->db.buddy_manager);
-}
-
-static int hws_pool_buddy_db_init(struct mlx5hws_pool *pool, u32 log_range)
-{
-       pool->db.buddy_manager = kzalloc(sizeof(*pool->db.buddy_manager), GFP_KERNEL);
-       if (!pool->db.buddy_manager)
-               return -ENOMEM;
-
-       if (pool->flags & MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE) {
-               bool new_buddy;
-
-               if (!hws_pool_buddy_get_next_buddy(pool, 0, log_range, &new_buddy)) {
-                       mlx5hws_err(pool->ctx,
-                                   "Failed allocating memory on create log_sz: %d\n", log_range);
-                       kfree(pool->db.buddy_manager);
-                       return -ENOMEM;
-               }
-       }
-
-       pool->p_db_uninit = &hws_pool_buddy_db_uninit;
-       pool->p_get_chunk = &hws_pool_buddy_db_get_chunk;
-       pool->p_put_chunk = &hws_pool_buddy_db_put_chunk;
-
-       return 0;
-}
-
-static int hws_pool_create_resource_on_index(struct mlx5hws_pool *pool,
-                                            u32 alloc_size, int idx)
-{
-       int ret = hws_pool_resource_alloc(pool, alloc_size, idx);
-
-       if (ret) {
-               mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
-                           pool->type, alloc_size, idx);
-               return ret;
-       }
-
-       return 0;
-}
-
-static struct mlx5hws_pool_elements *
-hws_pool_element_create_new_elem(struct mlx5hws_pool *pool, u32 order, int idx)
-{
-       struct mlx5hws_pool_elements *elem;
-       u32 alloc_size;
-
-       alloc_size = pool->alloc_log_sz;
-
-       elem = kzalloc(sizeof(*elem), GFP_KERNEL);
-       if (!elem)
-               return NULL;
-
-       /* Sharing the same resource, also means that all the elements are with size 1 */
-       if ((pool->flags & MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS) &&
-           !(pool->flags & MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK)) {
-                /* Currently all chunks in size 1 */
-               elem->bitmap = hws_pool_create_and_init_bitmap(alloc_size - order);
-               if (!elem->bitmap) {
-                       mlx5hws_err(pool->ctx,
-                                   "Failed to create bitmap type: %d: size %d index: %d\n",
-                                   pool->type, alloc_size, idx);
-                       goto free_elem;
-               }
-
-               elem->log_size = alloc_size - order;
-       }
-
-       if (hws_pool_create_resource_on_index(pool, alloc_size, idx)) {
-               mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
-                           pool->type, alloc_size, idx);
-               goto free_db;
-       }
-
-       pool->db.element_manager->elements[idx] = elem;
-
-       return elem;
-
-free_db:
-       bitmap_free(elem->bitmap);
-free_elem:
-       kfree(elem);
-       return NULL;
-}
-
-static int hws_pool_element_find_seg(struct mlx5hws_pool_elements *elem, int *seg)
-{
-       unsigned int segment, size;
-
-       size = 1 << elem->log_size;
-
-       segment = find_first_bit(elem->bitmap, size);
-       if (segment >= size) {
-               elem->is_full = true;
-               return -ENOMEM;
-       }
-
-       bitmap_clear(elem->bitmap, segment, 1);
-       *seg = segment;
-       return 0;
-}
-
-static int
-hws_pool_onesize_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order,
-                                      u32 *idx, int *seg)
-{
-       struct mlx5hws_pool_elements *elem;
-
-       elem = pool->db.element_manager->elements[0];
-       if (!elem)
-               elem = hws_pool_element_create_new_elem(pool, order, 0);
-       if (!elem)
-               goto err_no_elem;
-
-       if (hws_pool_element_find_seg(elem, seg) != 0) {
-               mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order);
-               return -ENOMEM;
-       }
-
-       *idx = 0;
-       elem->num_of_elements++;
-       return 0;
-
-err_no_elem:
-       mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order);
-       return -ENOMEM;
-}
-
-static int
-hws_pool_general_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order,
-                                      u32 *idx, int *seg)
-{
-       int ret, i;
-
-       for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
-               if (!pool->resource[i]) {
-                       ret = hws_pool_create_resource_on_index(pool, order, i);
-                       if (ret)
-                               goto err_no_res;
-                       *idx = i;
-                       *seg = 0; /* One memory slot in that element */
-                       return 0;
-               }
-       }
-
-       mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order);
-       return -ENOMEM;
-
-err_no_res:
-       mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order);
-       return -ENOMEM;
-}
-
-static int hws_pool_general_element_db_get_chunk(struct mlx5hws_pool *pool,
-                                                struct mlx5hws_pool_chunk *chunk)
-{
-       int ret;
-
-       /* Go over all memory elements and find/allocate free slot */
-       ret = hws_pool_general_element_get_mem_chunk(pool, chunk->order,
-                                                    &chunk->resource_idx,
-                                                    &chunk->offset);
-       if (ret)
-               mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
-                           chunk->order);
-
-       return ret;
-}
-
-static void hws_pool_general_element_db_put_chunk(struct mlx5hws_pool *pool,
-                                                 struct mlx5hws_pool_chunk *chunk)
-{
-       if (unlikely(!pool->resource[chunk->resource_idx]))
-               pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
-
-       if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE)
-               hws_pool_resource_free(pool, chunk->resource_idx);
-}
-
-static void hws_pool_general_element_db_uninit(struct mlx5hws_pool *pool)
-{
-       (void)pool;
-}
-
-/* This memory management works as the following:
- * - At start doesn't allocate no mem at all.
- * - When new request for chunk arrived:
- *     allocate resource and give it.
- * - When free that chunk:
- *     the resource is freed.
- */
-static int hws_pool_general_element_db_init(struct mlx5hws_pool *pool)
-{
-       pool->p_db_uninit = &hws_pool_general_element_db_uninit;
-       pool->p_get_chunk = &hws_pool_general_element_db_get_chunk;
-       pool->p_put_chunk = &hws_pool_general_element_db_put_chunk;
-
-       return 0;
-}
-
-static void hws_onesize_element_db_destroy_element(struct mlx5hws_pool *pool,
-                                                  struct mlx5hws_pool_elements *elem,
-                                                  struct mlx5hws_pool_chunk *chunk)
-{
-       if (unlikely(!pool->resource[chunk->resource_idx]))
-               pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
-
-       hws_pool_resource_free(pool, chunk->resource_idx);
-       kfree(elem);
-       pool->db.element_manager->elements[chunk->resource_idx] = NULL;
-}
-
-static void hws_onesize_element_db_put_chunk(struct mlx5hws_pool *pool,
-                                            struct mlx5hws_pool_chunk *chunk)
-{
-       struct mlx5hws_pool_elements *elem;
-
-       if (unlikely(chunk->resource_idx))
-               pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
-
-       elem = pool->db.element_manager->elements[chunk->resource_idx];
-       if (!elem) {
-               mlx5hws_err(pool->ctx, "No such element (%d)\n", chunk->resource_idx);
-               return;
-       }
-
-       bitmap_set(elem->bitmap, chunk->offset, 1);
-       elem->is_full = false;
-       elem->num_of_elements--;
-
-       if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE &&
-           !elem->num_of_elements)
-               hws_onesize_element_db_destroy_element(pool, elem, chunk);
-}
-
-static int hws_onesize_element_db_get_chunk(struct mlx5hws_pool *pool,
-                                           struct mlx5hws_pool_chunk *chunk)
-{
-       int ret = 0;
-
-       /* Go over all memory elements and find/allocate free slot */
-       ret = hws_pool_onesize_element_get_mem_chunk(pool, chunk->order,
-                                                    &chunk->resource_idx,
-                                                    &chunk->offset);
-       if (ret)
-               mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
-                           chunk->order);
-
-       return ret;
-}
-
-static void hws_onesize_element_db_uninit(struct mlx5hws_pool *pool)
-{
-       struct mlx5hws_pool_elements *elem;
-       int i;
-
-       for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
-               elem = pool->db.element_manager->elements[i];
-               if (elem) {
-                       bitmap_free(elem->bitmap);
-                       kfree(elem);
-                       pool->db.element_manager->elements[i] = NULL;
-               }
-       }
-       kfree(pool->db.element_manager);
-}
-
-/* This memory management works as the following:
- * - At start doesn't allocate no mem at all.
- * - When new request for chunk arrived:
- *  aloocate the first and only slot of memory/resource
- *  when it ended return error.
- */
-static int hws_pool_onesize_element_db_init(struct mlx5hws_pool *pool)
-{
-       pool->db.element_manager = kzalloc(sizeof(*pool->db.element_manager), GFP_KERNEL);
-       if (!pool->db.element_manager)
-               return -ENOMEM;
-
-       pool->p_db_uninit = &hws_onesize_element_db_uninit;
-       pool->p_get_chunk = &hws_onesize_element_db_get_chunk;
-       pool->p_put_chunk = &hws_onesize_element_db_put_chunk;
-
-       return 0;
-}
-
-static int hws_pool_db_init(struct mlx5hws_pool *pool,
-                           enum mlx5hws_db_type db_type)
-{
-       int ret;
-
-       if (db_type == MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE)
-               ret = hws_pool_general_element_db_init(pool);
-       else if (db_type == MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE)
-               ret = hws_pool_onesize_element_db_init(pool);
-       else
-               ret = hws_pool_buddy_db_init(pool, pool->alloc_log_sz);
-
-       if (ret) {
-               mlx5hws_err(pool->ctx, "Failed to init general db : %d (ret: %d)\n", db_type, ret);
-               return ret;
-       }
-
-       return 0;
-}
-
-static void hws_pool_db_unint(struct mlx5hws_pool *pool)
-{
-       pool->p_db_uninit(pool);
-}
-
-int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
-                            struct mlx5hws_pool_chunk *chunk)
-{
-       int ret;
-
-       mutex_lock(&pool->lock);
-       ret = pool->p_get_chunk(pool, chunk);
-       mutex_unlock(&pool->lock);
-
-       return ret;
-}
-
-void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
-                            struct mlx5hws_pool_chunk *chunk)
-{
-       mutex_lock(&pool->lock);
-       pool->p_put_chunk(pool, chunk);
-       mutex_unlock(&pool->lock);
-}
-
-struct mlx5hws_pool *
-mlx5hws_pool_create(struct mlx5hws_context *ctx, struct mlx5hws_pool_attr *pool_attr)
-{
-       enum mlx5hws_db_type res_db_type;
-       struct mlx5hws_pool *pool;
-
-       pool = kzalloc(sizeof(*pool), GFP_KERNEL);
-       if (!pool)
-               return NULL;
-
-       pool->ctx = ctx;
-       pool->type = pool_attr->pool_type;
-       pool->alloc_log_sz = pool_attr->alloc_log_sz;
-       pool->flags = pool_attr->flags;
-       pool->tbl_type = pool_attr->table_type;
-       pool->opt_type = pool_attr->opt_type;
-
-       /* Support general db */
-       if (pool->flags == (MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE |
-                           MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK))
-               res_db_type = MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE;
-       else if (pool->flags == (MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
-                                MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS))
-               res_db_type = MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE;
-       else
-               res_db_type = MLX5HWS_POOL_DB_TYPE_BUDDY;
-
-       pool->alloc_log_sz = pool_attr->alloc_log_sz;
-
-       if (hws_pool_db_init(pool, res_db_type))
-               goto free_pool;
-
-       mutex_init(&pool->lock);
-
-       return pool;
-
-free_pool:
-       kfree(pool);
-       return NULL;
-}
-
-int mlx5hws_pool_destroy(struct mlx5hws_pool *pool)
-{
-       int i;
-
-       mutex_destroy(&pool->lock);
-
-       for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++)
-               if (pool->resource[i])
-                       hws_pool_resource_free(pool, i);
-
-       hws_pool_db_unint(pool);
-
-       kfree(pool);
-       return 0;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h
deleted file mode 100644 (file)
index 621298b..0000000
+++ /dev/null
@@ -1,151 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#ifndef MLX5HWS_POOL_H_
-#define MLX5HWS_POOL_H_
-
-#define MLX5HWS_POOL_STC_LOG_SZ 15
-
-#define MLX5HWS_POOL_RESOURCE_ARR_SZ 100
-
-enum mlx5hws_pool_type {
-       MLX5HWS_POOL_TYPE_STE,
-       MLX5HWS_POOL_TYPE_STC,
-};
-
-struct mlx5hws_pool_chunk {
-       u32 resource_idx;
-       /* Internal offset, relative to base index */
-       int offset;
-       int order;
-};
-
-struct mlx5hws_pool_resource {
-       struct mlx5hws_pool *pool;
-       u32 base_id;
-       u32 range;
-};
-
-enum mlx5hws_pool_flags {
-       /* Only a one resource in that pool */
-       MLX5HWS_POOL_FLAGS_ONE_RESOURCE = 1 << 0,
-       MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE = 1 << 1,
-       /* No sharing resources between chunks */
-       MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK = 1 << 2,
-       /* All objects are in the same size */
-       MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS = 1 << 3,
-       /* Managed by buddy allocator */
-       MLX5HWS_POOL_FLAGS_BUDDY_MANAGED = 1 << 4,
-       /* Allocate pool_type memory on pool creation */
-       MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE = 1 << 5,
-
-       /* These values should be used by the caller */
-       MLX5HWS_POOL_FLAGS_FOR_STC_POOL =
-               MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
-               MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS,
-       MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL =
-               MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE |
-               MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK,
-       MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL =
-               MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
-               MLX5HWS_POOL_FLAGS_BUDDY_MANAGED |
-               MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE,
-};
-
-enum mlx5hws_pool_optimize {
-       MLX5HWS_POOL_OPTIMIZE_NONE = 0x0,
-       MLX5HWS_POOL_OPTIMIZE_ORIG = 0x1,
-       MLX5HWS_POOL_OPTIMIZE_MIRROR = 0x2,
-};
-
-struct mlx5hws_pool_attr {
-       enum mlx5hws_pool_type pool_type;
-       enum mlx5hws_table_type table_type;
-       enum mlx5hws_pool_flags flags;
-       enum mlx5hws_pool_optimize opt_type;
-       /* Allocation size once memory is depleted */
-       size_t alloc_log_sz;
-};
-
-enum mlx5hws_db_type {
-       /* Uses for allocating chunk of big memory, each element has its own resource in the FW*/
-       MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE,
-       /* One resource only, all the elements are with same one size */
-       MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE,
-       /* Many resources, the memory allocated with buddy mechanism */
-       MLX5HWS_POOL_DB_TYPE_BUDDY,
-};
-
-struct mlx5hws_buddy_manager {
-       struct mlx5hws_buddy_mem *buddies[MLX5HWS_POOL_RESOURCE_ARR_SZ];
-};
-
-struct mlx5hws_pool_elements {
-       u32 num_of_elements;
-       unsigned long *bitmap;
-       u32 log_size;
-       bool is_full;
-};
-
-struct mlx5hws_element_manager {
-       struct mlx5hws_pool_elements *elements[MLX5HWS_POOL_RESOURCE_ARR_SZ];
-};
-
-struct mlx5hws_pool_db {
-       enum mlx5hws_db_type type;
-       union {
-               struct mlx5hws_element_manager *element_manager;
-               struct mlx5hws_buddy_manager *buddy_manager;
-       };
-};
-
-typedef int (*mlx5hws_pool_db_get_chunk)(struct mlx5hws_pool *pool,
-                                       struct mlx5hws_pool_chunk *chunk);
-typedef void (*mlx5hws_pool_db_put_chunk)(struct mlx5hws_pool *pool,
-                                        struct mlx5hws_pool_chunk *chunk);
-typedef void (*mlx5hws_pool_unint_db)(struct mlx5hws_pool *pool);
-
-struct mlx5hws_pool {
-       struct mlx5hws_context *ctx;
-       enum mlx5hws_pool_type type;
-       enum mlx5hws_pool_flags flags;
-       struct mutex lock; /* protect the pool */
-       size_t alloc_log_sz;
-       enum mlx5hws_table_type tbl_type;
-       enum mlx5hws_pool_optimize opt_type;
-       struct mlx5hws_pool_resource *resource[MLX5HWS_POOL_RESOURCE_ARR_SZ];
-       struct mlx5hws_pool_resource *mirror_resource[MLX5HWS_POOL_RESOURCE_ARR_SZ];
-       /* DB */
-       struct mlx5hws_pool_db db;
-       /* Functions */
-       mlx5hws_pool_unint_db p_db_uninit;
-       mlx5hws_pool_db_get_chunk p_get_chunk;
-       mlx5hws_pool_db_put_chunk p_put_chunk;
-};
-
-struct mlx5hws_pool *
-mlx5hws_pool_create(struct mlx5hws_context *ctx,
-                   struct mlx5hws_pool_attr *pool_attr);
-
-int mlx5hws_pool_destroy(struct mlx5hws_pool *pool);
-
-int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
-                            struct mlx5hws_pool_chunk *chunk);
-
-void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
-                            struct mlx5hws_pool_chunk *chunk);
-
-static inline u32
-mlx5hws_pool_chunk_get_base_id(struct mlx5hws_pool *pool,
-                              struct mlx5hws_pool_chunk *chunk)
-{
-       return pool->resource[chunk->resource_idx]->base_id;
-}
-
-static inline u32
-mlx5hws_pool_chunk_get_base_mirror_id(struct mlx5hws_pool *pool,
-                                     struct mlx5hws_pool_chunk *chunk)
-{
-       return pool->mirror_resource[chunk->resource_idx]->base_id;
-}
-#endif /* MLX5HWS_POOL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h
deleted file mode 100644 (file)
index de92cec..0000000
+++ /dev/null
@@ -1,514 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#ifndef MLX5_PRM_H_
-#define MLX5_PRM_H_
-
-#define MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY 512
-
-/* Action type of header modification. */
-enum {
-       MLX5_MODIFICATION_TYPE_SET = 0x1,
-       MLX5_MODIFICATION_TYPE_ADD = 0x2,
-       MLX5_MODIFICATION_TYPE_COPY = 0x3,
-       MLX5_MODIFICATION_TYPE_INSERT = 0x4,
-       MLX5_MODIFICATION_TYPE_REMOVE = 0x5,
-       MLX5_MODIFICATION_TYPE_NOP = 0x6,
-       MLX5_MODIFICATION_TYPE_REMOVE_WORDS = 0x7,
-       MLX5_MODIFICATION_TYPE_ADD_FIELD = 0x8,
-       MLX5_MODIFICATION_TYPE_MAX,
-};
-
-/* The field of packet to be modified. */
-enum mlx5_modification_field {
-       MLX5_MODI_OUT_NONE = -1,
-       MLX5_MODI_OUT_SMAC_47_16 = 1,
-       MLX5_MODI_OUT_SMAC_15_0,
-       MLX5_MODI_OUT_ETHERTYPE,
-       MLX5_MODI_OUT_DMAC_47_16,
-       MLX5_MODI_OUT_DMAC_15_0,
-       MLX5_MODI_OUT_IP_DSCP,
-       MLX5_MODI_OUT_TCP_FLAGS,
-       MLX5_MODI_OUT_TCP_SPORT,
-       MLX5_MODI_OUT_TCP_DPORT,
-       MLX5_MODI_OUT_IPV4_TTL,
-       MLX5_MODI_OUT_UDP_SPORT,
-       MLX5_MODI_OUT_UDP_DPORT,
-       MLX5_MODI_OUT_SIPV6_127_96,
-       MLX5_MODI_OUT_SIPV6_95_64,
-       MLX5_MODI_OUT_SIPV6_63_32,
-       MLX5_MODI_OUT_SIPV6_31_0,
-       MLX5_MODI_OUT_DIPV6_127_96,
-       MLX5_MODI_OUT_DIPV6_95_64,
-       MLX5_MODI_OUT_DIPV6_63_32,
-       MLX5_MODI_OUT_DIPV6_31_0,
-       MLX5_MODI_OUT_SIPV4,
-       MLX5_MODI_OUT_DIPV4,
-       MLX5_MODI_OUT_FIRST_VID,
-       MLX5_MODI_IN_SMAC_47_16 = 0x31,
-       MLX5_MODI_IN_SMAC_15_0,
-       MLX5_MODI_IN_ETHERTYPE,
-       MLX5_MODI_IN_DMAC_47_16,
-       MLX5_MODI_IN_DMAC_15_0,
-       MLX5_MODI_IN_IP_DSCP,
-       MLX5_MODI_IN_TCP_FLAGS,
-       MLX5_MODI_IN_TCP_SPORT,
-       MLX5_MODI_IN_TCP_DPORT,
-       MLX5_MODI_IN_IPV4_TTL,
-       MLX5_MODI_IN_UDP_SPORT,
-       MLX5_MODI_IN_UDP_DPORT,
-       MLX5_MODI_IN_SIPV6_127_96,
-       MLX5_MODI_IN_SIPV6_95_64,
-       MLX5_MODI_IN_SIPV6_63_32,
-       MLX5_MODI_IN_SIPV6_31_0,
-       MLX5_MODI_IN_DIPV6_127_96,
-       MLX5_MODI_IN_DIPV6_95_64,
-       MLX5_MODI_IN_DIPV6_63_32,
-       MLX5_MODI_IN_DIPV6_31_0,
-       MLX5_MODI_IN_SIPV4,
-       MLX5_MODI_IN_DIPV4,
-       MLX5_MODI_OUT_IPV6_HOPLIMIT,
-       MLX5_MODI_IN_IPV6_HOPLIMIT,
-       MLX5_MODI_META_DATA_REG_A,
-       MLX5_MODI_META_DATA_REG_B = 0x50,
-       MLX5_MODI_META_REG_C_0,
-       MLX5_MODI_META_REG_C_1,
-       MLX5_MODI_META_REG_C_2,
-       MLX5_MODI_META_REG_C_3,
-       MLX5_MODI_META_REG_C_4,
-       MLX5_MODI_META_REG_C_5,
-       MLX5_MODI_META_REG_C_6,
-       MLX5_MODI_META_REG_C_7,
-       MLX5_MODI_OUT_TCP_SEQ_NUM,
-       MLX5_MODI_IN_TCP_SEQ_NUM,
-       MLX5_MODI_OUT_TCP_ACK_NUM,
-       MLX5_MODI_IN_TCP_ACK_NUM = 0x5C,
-       MLX5_MODI_GTP_TEID = 0x6E,
-       MLX5_MODI_OUT_IP_ECN = 0x73,
-       MLX5_MODI_TUNNEL_HDR_DW_1 = 0x75,
-       MLX5_MODI_GTPU_FIRST_EXT_DW_0 = 0x76,
-       MLX5_MODI_HASH_RESULT = 0x81,
-       MLX5_MODI_IN_MPLS_LABEL_0 = 0x8a,
-       MLX5_MODI_IN_MPLS_LABEL_1,
-       MLX5_MODI_IN_MPLS_LABEL_2,
-       MLX5_MODI_IN_MPLS_LABEL_3,
-       MLX5_MODI_IN_MPLS_LABEL_4,
-       MLX5_MODI_OUT_IP_PROTOCOL = 0x4A,
-       MLX5_MODI_OUT_IPV6_NEXT_HDR = 0x4A,
-       MLX5_MODI_META_REG_C_8 = 0x8F,
-       MLX5_MODI_META_REG_C_9 = 0x90,
-       MLX5_MODI_META_REG_C_10 = 0x91,
-       MLX5_MODI_META_REG_C_11 = 0x92,
-       MLX5_MODI_META_REG_C_12 = 0x93,
-       MLX5_MODI_META_REG_C_13 = 0x94,
-       MLX5_MODI_META_REG_C_14 = 0x95,
-       MLX5_MODI_META_REG_C_15 = 0x96,
-       MLX5_MODI_OUT_IPV4_TOTAL_LEN = 0x11D,
-       MLX5_MODI_OUT_IPV6_PAYLOAD_LEN = 0x11E,
-       MLX5_MODI_OUT_IPV4_IHL = 0x11F,
-       MLX5_MODI_OUT_TCP_DATA_OFFSET = 0x120,
-       MLX5_MODI_OUT_ESP_SPI = 0x5E,
-       MLX5_MODI_OUT_ESP_SEQ_NUM = 0x82,
-       MLX5_MODI_OUT_IPSEC_NEXT_HDR = 0x126,
-       MLX5_MODI_INVALID = INT_MAX,
-};
-
-enum {
-       MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE = 0x7 << 1,
-       MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE = 0x8 << 1,
-       MLX5_SET_HCA_CAP_OP_MOD_ESW = 0x9 << 1,
-       MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE = 0x1B << 1,
-       MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 = 0x20 << 1,
-};
-
-enum mlx5_ifc_rtc_update_mode {
-       MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH = 0x0,
-       MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET = 0x1,
-};
-
-enum mlx5_ifc_rtc_access_mode {
-       MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH = 0x0,
-       MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR = 0x1,
-};
-
-enum mlx5_ifc_rtc_ste_format {
-       MLX5_IFC_RTC_STE_FORMAT_8DW = 0x4,
-       MLX5_IFC_RTC_STE_FORMAT_11DW = 0x5,
-       MLX5_IFC_RTC_STE_FORMAT_RANGE = 0x7,
-};
-
-enum mlx5_ifc_rtc_reparse_mode {
-       MLX5_IFC_RTC_REPARSE_NEVER = 0x0,
-       MLX5_IFC_RTC_REPARSE_ALWAYS = 0x1,
-       MLX5_IFC_RTC_REPARSE_BY_STC = 0x2,
-};
-
-#define MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX 16
-
-struct mlx5_ifc_rtc_bits {
-       u8 modify_field_select[0x40];
-       u8 reserved_at_40[0x40];
-       u8 update_index_mode[0x2];
-       u8 reparse_mode[0x2];
-       u8 num_match_ste[0x4];
-       u8 pd[0x18];
-       u8 reserved_at_a0[0x9];
-       u8 access_index_mode[0x3];
-       u8 num_hash_definer[0x4];
-       u8 update_method[0x1];
-       u8 reserved_at_b1[0x2];
-       u8 log_depth[0x5];
-       u8 log_hash_size[0x8];
-       u8 ste_format_0[0x8];
-       u8 table_type[0x8];
-       u8 ste_format_1[0x8];
-       u8 reserved_at_d8[0x8];
-       u8 match_definer_0[0x20];
-       u8 stc_id[0x20];
-       u8 ste_table_base_id[0x20];
-       u8 ste_table_offset[0x20];
-       u8 reserved_at_160[0x8];
-       u8 miss_flow_table_id[0x18];
-       u8 match_definer_1[0x20];
-       u8 reserved_at_1a0[0x260];
-};
-
-enum mlx5_ifc_stc_action_type {
-       MLX5_IFC_STC_ACTION_TYPE_NOP = 0x00,
-       MLX5_IFC_STC_ACTION_TYPE_COPY = 0x05,
-       MLX5_IFC_STC_ACTION_TYPE_SET = 0x06,
-       MLX5_IFC_STC_ACTION_TYPE_ADD = 0x07,
-       MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS = 0x08,
-       MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE = 0x09,
-       MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT = 0x0b,
-       MLX5_IFC_STC_ACTION_TYPE_TAG = 0x0c,
-       MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST = 0x0e,
-       MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_ENCRYPTION = 0x10,
-       MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_DECRYPTION = 0x11,
-       MLX5_IFC_STC_ACTION_TYPE_ASO = 0x12,
-       MLX5_IFC_STC_ACTION_TYPE_TRAILER = 0x13,
-       MLX5_IFC_STC_ACTION_TYPE_COUNTER = 0x14,
-       MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD = 0x1b,
-       MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE = 0x80,
-       MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR = 0x81,
-       MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT = 0x82,
-       MLX5_IFC_STC_ACTION_TYPE_DROP = 0x83,
-       MLX5_IFC_STC_ACTION_TYPE_ALLOW = 0x84,
-       MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT = 0x85,
-       MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK = 0x86,
-};
-
-enum mlx5_ifc_stc_reparse_mode {
-       MLX5_IFC_STC_REPARSE_IGNORE = 0x0,
-       MLX5_IFC_STC_REPARSE_NEVER = 0x1,
-       MLX5_IFC_STC_REPARSE_ALWAYS = 0x2,
-};
-
-struct mlx5_ifc_stc_ste_param_ste_table_bits {
-       u8 ste_obj_id[0x20];
-       u8 match_definer_id[0x20];
-       u8 reserved_at_40[0x3];
-       u8 log_hash_size[0x5];
-       u8 reserved_at_48[0x38];
-};
-
-struct mlx5_ifc_stc_ste_param_tir_bits {
-       u8 reserved_at_0[0x8];
-       u8 tirn[0x18];
-       u8 reserved_at_20[0x60];
-};
-
-struct mlx5_ifc_stc_ste_param_table_bits {
-       u8 reserved_at_0[0x8];
-       u8 table_id[0x18];
-       u8 reserved_at_20[0x60];
-};
-
-struct mlx5_ifc_stc_ste_param_flow_counter_bits {
-       u8 flow_counter_id[0x20];
-};
-
-enum {
-       MLX5_ASO_CT_NUM_PER_OBJ = 1,
-       MLX5_ASO_METER_NUM_PER_OBJ = 2,
-       MLX5_ASO_IPSEC_NUM_PER_OBJ = 1,
-       MLX5_ASO_FIRST_HIT_NUM_PER_OBJ = 512,
-};
-
-struct mlx5_ifc_stc_ste_param_execute_aso_bits {
-       u8 aso_object_id[0x20];
-       u8 return_reg_id[0x4];
-       u8 aso_type[0x4];
-       u8 reserved_at_28[0x18];
-};
-
-struct mlx5_ifc_stc_ste_param_ipsec_encrypt_bits {
-       u8 ipsec_object_id[0x20];
-};
-
-struct mlx5_ifc_stc_ste_param_ipsec_decrypt_bits {
-       u8 ipsec_object_id[0x20];
-};
-
-struct mlx5_ifc_stc_ste_param_trailer_bits {
-       u8 reserved_at_0[0x8];
-       u8 command[0x4];
-       u8 reserved_at_c[0x2];
-       u8 type[0x2];
-       u8 reserved_at_10[0xa];
-       u8 length[0x6];
-};
-
-struct mlx5_ifc_stc_ste_param_header_modify_list_bits {
-       u8 header_modify_pattern_id[0x20];
-       u8 header_modify_argument_id[0x20];
-};
-
-enum mlx5_ifc_header_anchors {
-       MLX5_HEADER_ANCHOR_PACKET_START = 0x0,
-       MLX5_HEADER_ANCHOR_MAC = 0x1,
-       MLX5_HEADER_ANCHOR_FIRST_VLAN_START = 0x2,
-       MLX5_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
-       MLX5_HEADER_ANCHOR_ESP = 0x08,
-       MLX5_HEADER_ANCHOR_TCP_UDP = 0x09,
-       MLX5_HEADER_ANCHOR_TUNNEL_HEADER = 0x0a,
-       MLX5_HEADER_ANCHOR_INNER_MAC = 0x13,
-       MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
-       MLX5_HEADER_ANCHOR_INNER_TCP_UDP = 0x1a,
-       MLX5_HEADER_ANCHOR_L4_PAYLOAD = 0x1b,
-       MLX5_HEADER_ANCHOR_INNER_L4_PAYLOAD = 0x1c
-};
-
-struct mlx5_ifc_stc_ste_param_remove_bits {
-       u8 action_type[0x4];
-       u8 decap[0x1];
-       u8 reserved_at_5[0x5];
-       u8 remove_start_anchor[0x6];
-       u8 reserved_at_10[0x2];
-       u8 remove_end_anchor[0x6];
-       u8 reserved_at_18[0x8];
-};
-
-struct mlx5_ifc_stc_ste_param_remove_words_bits {
-       u8 action_type[0x4];
-       u8 reserved_at_4[0x6];
-       u8 remove_start_anchor[0x6];
-       u8 reserved_at_10[0x1];
-       u8 remove_offset[0x7];
-       u8 reserved_at_18[0x2];
-       u8 remove_size[0x6];
-};
-
-struct mlx5_ifc_stc_ste_param_insert_bits {
-       u8 action_type[0x4];
-       u8 encap[0x1];
-       u8 inline_data[0x1];
-       u8 reserved_at_6[0x4];
-       u8 insert_anchor[0x6];
-       u8 reserved_at_10[0x1];
-       u8 insert_offset[0x7];
-       u8 reserved_at_18[0x1];
-       u8 insert_size[0x7];
-       u8 insert_argument[0x20];
-};
-
-struct mlx5_ifc_stc_ste_param_vport_bits {
-       u8 eswitch_owner_vhca_id[0x10];
-       u8 vport_number[0x10];
-       u8 eswitch_owner_vhca_id_valid[0x1];
-       u8 reserved_at_21[0x5f];
-};
-
-union mlx5_ifc_stc_param_bits {
-       struct mlx5_ifc_stc_ste_param_ste_table_bits ste_table;
-       struct mlx5_ifc_stc_ste_param_tir_bits tir;
-       struct mlx5_ifc_stc_ste_param_table_bits table;
-       struct mlx5_ifc_stc_ste_param_flow_counter_bits counter;
-       struct mlx5_ifc_stc_ste_param_header_modify_list_bits modify_header;
-       struct mlx5_ifc_stc_ste_param_execute_aso_bits aso;
-       struct mlx5_ifc_stc_ste_param_remove_bits remove_header;
-       struct mlx5_ifc_stc_ste_param_insert_bits insert_header;
-       struct mlx5_ifc_set_action_in_bits add;
-       struct mlx5_ifc_set_action_in_bits set;
-       struct mlx5_ifc_copy_action_in_bits copy;
-       struct mlx5_ifc_stc_ste_param_vport_bits vport;
-       struct mlx5_ifc_stc_ste_param_ipsec_encrypt_bits ipsec_encrypt;
-       struct mlx5_ifc_stc_ste_param_ipsec_decrypt_bits ipsec_decrypt;
-       struct mlx5_ifc_stc_ste_param_trailer_bits trailer;
-       u8 reserved_at_0[0x80];
-};
-
-enum {
-       MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC = BIT(0),
-};
-
-struct mlx5_ifc_stc_bits {
-       u8 modify_field_select[0x40];
-       u8 reserved_at_40[0x46];
-       u8 reparse_mode[0x2];
-       u8 table_type[0x8];
-       u8 ste_action_offset[0x8];
-       u8 action_type[0x8];
-       u8 reserved_at_a0[0x60];
-       union mlx5_ifc_stc_param_bits stc_param;
-       u8 reserved_at_180[0x280];
-};
-
-struct mlx5_ifc_ste_bits {
-       u8 modify_field_select[0x40];
-       u8 reserved_at_40[0x48];
-       u8 table_type[0x8];
-       u8 reserved_at_90[0x370];
-};
-
-struct mlx5_ifc_definer_bits {
-       u8 modify_field_select[0x40];
-       u8 reserved_at_40[0x50];
-       u8 format_id[0x10];
-       u8 reserved_at_60[0x60];
-       u8 format_select_dw3[0x8];
-       u8 format_select_dw2[0x8];
-       u8 format_select_dw1[0x8];
-       u8 format_select_dw0[0x8];
-       u8 format_select_dw7[0x8];
-       u8 format_select_dw6[0x8];
-       u8 format_select_dw5[0x8];
-       u8 format_select_dw4[0x8];
-       u8 reserved_at_100[0x18];
-       u8 format_select_dw8[0x8];
-       u8 reserved_at_120[0x20];
-       u8 format_select_byte3[0x8];
-       u8 format_select_byte2[0x8];
-       u8 format_select_byte1[0x8];
-       u8 format_select_byte0[0x8];
-       u8 format_select_byte7[0x8];
-       u8 format_select_byte6[0x8];
-       u8 format_select_byte5[0x8];
-       u8 format_select_byte4[0x8];
-       u8 reserved_at_180[0x40];
-       u8 ctrl[0xa0];
-       u8 match_mask[0x160];
-};
-
-struct mlx5_ifc_arg_bits {
-       u8 rsvd0[0x88];
-       u8 access_pd[0x18];
-};
-
-struct mlx5_ifc_header_modify_pattern_in_bits {
-       u8 modify_field_select[0x40];
-
-       u8 reserved_at_40[0x40];
-
-       u8 pattern_length[0x8];
-       u8 reserved_at_88[0x18];
-
-       u8 reserved_at_a0[0x60];
-
-       u8 pattern_data[MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY * 8];
-};
-
-struct mlx5_ifc_create_rtc_in_bits {
-       struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
-       struct mlx5_ifc_rtc_bits rtc;
-};
-
-struct mlx5_ifc_create_stc_in_bits {
-       struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
-       struct mlx5_ifc_stc_bits stc;
-};
-
-struct mlx5_ifc_create_ste_in_bits {
-       struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
-       struct mlx5_ifc_ste_bits ste;
-};
-
-struct mlx5_ifc_create_definer_in_bits {
-       struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
-       struct mlx5_ifc_definer_bits definer;
-};
-
-struct mlx5_ifc_create_arg_in_bits {
-       struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
-       struct mlx5_ifc_arg_bits arg;
-};
-
-struct mlx5_ifc_create_header_modify_pattern_in_bits {
-       struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
-       struct mlx5_ifc_header_modify_pattern_in_bits pattern;
-};
-
-struct mlx5_ifc_generate_wqe_in_bits {
-       u8 opcode[0x10];
-       u8 uid[0x10];
-       u8 reserved_at_20[0x10];
-       u8 op_mode[0x10];
-       u8 reserved_at_40[0x40];
-       u8 reserved_at_80[0x8];
-       u8 pdn[0x18];
-       u8 reserved_at_a0[0x160];
-       u8 wqe_ctrl[0x80];
-       u8 wqe_gta_ctrl[0x180];
-       u8 wqe_gta_data_0[0x200];
-       u8 wqe_gta_data_1[0x200];
-};
-
-struct mlx5_ifc_generate_wqe_out_bits {
-       u8 status[0x8];
-       u8 reserved_at_8[0x18];
-       u8 syndrome[0x20];
-       u8 reserved_at_40[0x1c0];
-       u8 cqe_data[0x200];
-};
-
-enum mlx5_access_aso_opc_mod {
-       ASO_OPC_MOD_IPSEC = 0x0,
-       ASO_OPC_MOD_CONNECTION_TRACKING = 0x1,
-       ASO_OPC_MOD_POLICER = 0x2,
-       ASO_OPC_MOD_RACE_AVOIDANCE = 0x3,
-       ASO_OPC_MOD_FLOW_HIT = 0x4,
-};
-
-enum {
-       MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION = BIT(0),
-       MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID = BIT(1),
-};
-
-enum {
-       MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_DEFAULT = 0,
-       MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL = 1,
-};
-
-struct mlx5_ifc_alloc_packet_reformat_out_bits {
-       u8 status[0x8];
-       u8 reserved_at_8[0x18];
-
-       u8 syndrome[0x20];
-
-       u8 packet_reformat_id[0x20];
-
-       u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_dealloc_packet_reformat_in_bits {
-       u8 opcode[0x10];
-       u8 reserved_at_10[0x10];
-
-       u8 reserved_at_20[0x10];
-       u8 op_mod[0x10];
-
-       u8 packet_reformat_id[0x20];
-
-       u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_dealloc_packet_reformat_out_bits {
-       u8 status[0x8];
-       u8 reserved_at_8[0x18];
-
-       u8 syndrome[0x20];
-
-       u8 reserved_at_40[0x40];
-};
-
-#endif /* MLX5_PRM_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c
deleted file mode 100644 (file)
index 8a011b9..0000000
+++ /dev/null
@@ -1,780 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#include "mlx5hws_internal.h"
-
-static void hws_rule_skip(struct mlx5hws_matcher *matcher,
-                         struct mlx5hws_match_template *mt,
-                         u32 flow_source,
-                         bool *skip_rx, bool *skip_tx)
-{
-       /* By default FDB rules are added to both RX and TX */
-       *skip_rx = false;
-       *skip_tx = false;
-
-       if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT) {
-               *skip_rx = true;
-       } else if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK) {
-               *skip_tx = true;
-       } else {
-               /* If no flow source was set for current rule,
-                * check for flow source in matcher attributes.
-                */
-               if (matcher->attr.optimize_flow_src) {
-                       *skip_tx =
-                               matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE;
-                       *skip_rx =
-                               matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT;
-                       return;
-               }
-       }
-}
-
-static void
-hws_rule_update_copy_tag(struct mlx5hws_rule *rule,
-                        struct mlx5hws_wqe_gta_data_seg_ste *wqe_data,
-                        bool is_jumbo)
-{
-       struct mlx5hws_rule_match_tag *tag;
-
-       if (!mlx5hws_matcher_is_resizable(rule->matcher)) {
-               tag = &rule->tag;
-       } else {
-               struct mlx5hws_wqe_gta_data_seg_ste *data_seg =
-                       (struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg;
-               tag = (struct mlx5hws_rule_match_tag *)(void *)data_seg->action;
-       }
-
-       if (is_jumbo)
-               memcpy(wqe_data->jumbo, tag->jumbo, MLX5HWS_JUMBO_TAG_SZ);
-       else
-               memcpy(wqe_data->tag, tag->match, MLX5HWS_MATCH_TAG_SZ);
-}
-
-static void hws_rule_init_dep_wqe(struct mlx5hws_send_ring_dep_wqe *dep_wqe,
-                                 struct mlx5hws_rule *rule,
-                                 struct mlx5hws_match_template *mt,
-                                 struct mlx5hws_rule_attr *attr)
-{
-       struct mlx5hws_matcher *matcher = rule->matcher;
-       struct mlx5hws_table *tbl = matcher->tbl;
-       bool skip_rx, skip_tx;
-
-       dep_wqe->rule = rule;
-       dep_wqe->user_data = attr->user_data;
-       dep_wqe->direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ?
-                               attr->rule_idx : 0;
-
-       if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
-               hws_rule_skip(matcher, mt, attr->flow_source, &skip_rx, &skip_tx);
-
-               if (!skip_rx) {
-                       dep_wqe->rtc_0 = matcher->match_ste.rtc_0_id;
-                       dep_wqe->retry_rtc_0 = matcher->col_matcher ?
-                                              matcher->col_matcher->match_ste.rtc_0_id : 0;
-               } else {
-                       dep_wqe->rtc_0 = 0;
-                       dep_wqe->retry_rtc_0 = 0;
-               }
-
-               if (!skip_tx) {
-                       dep_wqe->rtc_1 = matcher->match_ste.rtc_1_id;
-                       dep_wqe->retry_rtc_1 = matcher->col_matcher ?
-                                              matcher->col_matcher->match_ste.rtc_1_id : 0;
-               } else {
-                       dep_wqe->rtc_1 = 0;
-                       dep_wqe->retry_rtc_1 = 0;
-               }
-       } else {
-               pr_warn("HWS: invalid tbl->type: %d\n", tbl->type);
-       }
-}
-
-static void hws_rule_move_get_rtc(struct mlx5hws_rule *rule,
-                                 struct mlx5hws_send_ste_attr *ste_attr)
-{
-       struct mlx5hws_matcher *dst_matcher = rule->matcher->resize_dst;
-
-       if (rule->resize_info->rtc_0) {
-               ste_attr->rtc_0 = dst_matcher->match_ste.rtc_0_id;
-               ste_attr->retry_rtc_0 = dst_matcher->col_matcher ?
-                                       dst_matcher->col_matcher->match_ste.rtc_0_id : 0;
-       }
-       if (rule->resize_info->rtc_1) {
-               ste_attr->rtc_1 = dst_matcher->match_ste.rtc_1_id;
-               ste_attr->retry_rtc_1 = dst_matcher->col_matcher ?
-                                       dst_matcher->col_matcher->match_ste.rtc_1_id : 0;
-       }
-}
-
-static void hws_rule_gen_comp(struct mlx5hws_send_engine *queue,
-                             struct mlx5hws_rule *rule,
-                             bool err,
-                             void *user_data,
-                             enum mlx5hws_rule_status rule_status_on_succ)
-{
-       enum mlx5hws_flow_op_status comp_status;
-
-       if (!err) {
-               comp_status = MLX5HWS_FLOW_OP_SUCCESS;
-               rule->status = rule_status_on_succ;
-       } else {
-               comp_status = MLX5HWS_FLOW_OP_ERROR;
-               rule->status = MLX5HWS_RULE_STATUS_FAILED;
-       }
-
-       mlx5hws_send_engine_inc_rule(queue);
-       mlx5hws_send_engine_gen_comp(queue, user_data, comp_status);
-}
-
-static void
-hws_rule_save_resize_info(struct mlx5hws_rule *rule,
-                         struct mlx5hws_send_ste_attr *ste_attr,
-                         bool is_update)
-{
-       if (!mlx5hws_matcher_is_resizable(rule->matcher))
-               return;
-
-       if (likely(!is_update)) {
-               rule->resize_info = kzalloc(sizeof(*rule->resize_info), GFP_KERNEL);
-               if (unlikely(!rule->resize_info)) {
-                       pr_warn("HWS: resize info isn't allocated for rule\n");
-                       return;
-               }
-
-               rule->resize_info->max_stes =
-                       rule->matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes;
-               rule->resize_info->action_ste_pool[0] = rule->matcher->action_ste[0].max_stes ?
-                                                       rule->matcher->action_ste[0].pool :
-                                                       NULL;
-               rule->resize_info->action_ste_pool[1] = rule->matcher->action_ste[1].max_stes ?
-                                                       rule->matcher->action_ste[1].pool :
-                                                       NULL;
-       }
-
-       memcpy(rule->resize_info->ctrl_seg, ste_attr->wqe_ctrl,
-              sizeof(rule->resize_info->ctrl_seg));
-       memcpy(rule->resize_info->data_seg, ste_attr->wqe_data,
-              sizeof(rule->resize_info->data_seg));
-}
-
-void mlx5hws_rule_clear_resize_info(struct mlx5hws_rule *rule)
-{
-       if (mlx5hws_matcher_is_resizable(rule->matcher) &&
-           rule->resize_info) {
-               kfree(rule->resize_info);
-               rule->resize_info = NULL;
-       }
-}
-
-static void
-hws_rule_save_delete_info(struct mlx5hws_rule *rule,
-                         struct mlx5hws_send_ste_attr *ste_attr)
-{
-       struct mlx5hws_match_template *mt = rule->matcher->mt;
-       bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
-
-       if (mlx5hws_matcher_is_resizable(rule->matcher))
-               return;
-
-       if (is_jumbo)
-               memcpy(&rule->tag.jumbo, ste_attr->wqe_data->jumbo, MLX5HWS_JUMBO_TAG_SZ);
-       else
-               memcpy(&rule->tag.match, ste_attr->wqe_data->tag, MLX5HWS_MATCH_TAG_SZ);
-}
-
-static void
-hws_rule_clear_delete_info(struct mlx5hws_rule *rule)
-{
-       /* nothing to do here */
-}
-
-static void
-hws_rule_load_delete_info(struct mlx5hws_rule *rule,
-                         struct mlx5hws_send_ste_attr *ste_attr)
-{
-       if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher))) {
-               ste_attr->wqe_tag = &rule->tag;
-       } else {
-               struct mlx5hws_wqe_gta_data_seg_ste *data_seg =
-                       (struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg;
-               struct mlx5hws_rule_match_tag *tag =
-                       (struct mlx5hws_rule_match_tag *)(void *)data_seg->action;
-               ste_attr->wqe_tag = tag;
-       }
-}
-
-static int hws_rule_alloc_action_ste_idx(struct mlx5hws_rule *rule,
-                                        u8 action_ste_selector)
-{
-       struct mlx5hws_matcher *matcher = rule->matcher;
-       struct mlx5hws_matcher_action_ste *action_ste;
-       struct mlx5hws_pool_chunk ste = {0};
-       int ret;
-
-       action_ste = &matcher->action_ste[action_ste_selector];
-       ste.order = ilog2(roundup_pow_of_two(action_ste->max_stes));
-       ret = mlx5hws_pool_chunk_alloc(action_ste->pool, &ste);
-       if (unlikely(ret)) {
-               mlx5hws_err(matcher->tbl->ctx,
-                           "Failed to allocate STE for rule actions");
-               return ret;
-       }
-       rule->action_ste_idx = ste.offset;
-
-       return 0;
-}
-
-static void hws_rule_free_action_ste_idx(struct mlx5hws_rule *rule,
-                                        u8 action_ste_selector)
-{
-       struct mlx5hws_matcher *matcher = rule->matcher;
-       struct mlx5hws_pool_chunk ste = {0};
-       struct mlx5hws_pool *pool;
-       u8 max_stes;
-
-       if (mlx5hws_matcher_is_resizable(matcher)) {
-               /* Free the original action pool if rule was resized */
-               max_stes = rule->resize_info->max_stes;
-               pool = rule->resize_info->action_ste_pool[action_ste_selector];
-       } else {
-               max_stes = matcher->action_ste[action_ste_selector].max_stes;
-               pool = matcher->action_ste[action_ste_selector].pool;
-       }
-
-       /* This release is safe only when the rule match part was deleted */
-       ste.order = ilog2(roundup_pow_of_two(max_stes));
-       ste.offset = rule->action_ste_idx;
-
-       mlx5hws_pool_chunk_free(pool, &ste);
-}
-
-static int hws_rule_alloc_action_ste(struct mlx5hws_rule *rule,
-                                    struct mlx5hws_rule_attr *attr)
-{
-       int action_ste_idx;
-       int ret;
-
-       ret = hws_rule_alloc_action_ste_idx(rule, 0);
-       if (unlikely(ret))
-               return ret;
-
-       action_ste_idx = rule->action_ste_idx;
-
-       ret = hws_rule_alloc_action_ste_idx(rule, 1);
-       if (unlikely(ret)) {
-               hws_rule_free_action_ste_idx(rule, 0);
-               return ret;
-       }
-
-       /* Both pools have to return the same index */
-       if (unlikely(rule->action_ste_idx != action_ste_idx)) {
-               pr_warn("HWS: allocation of action STE failed - pool indexes mismatch\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule)
-{
-       if (rule->action_ste_idx > -1) {
-               hws_rule_free_action_ste_idx(rule, 1);
-               hws_rule_free_action_ste_idx(rule, 0);
-       }
-}
-
-static void hws_rule_create_init(struct mlx5hws_rule *rule,
-                                struct mlx5hws_send_ste_attr *ste_attr,
-                                struct mlx5hws_actions_apply_data *apply,
-                                bool is_update)
-{
-       struct mlx5hws_matcher *matcher = rule->matcher;
-       struct mlx5hws_table *tbl = matcher->tbl;
-       struct mlx5hws_context *ctx = tbl->ctx;
-
-       /* Init rule before reuse */
-       if (!is_update) {
-               /* In update we use these rtc's */
-               rule->rtc_0 = 0;
-               rule->rtc_1 = 0;
-               rule->action_ste_selector = 0;
-       } else {
-               rule->action_ste_selector = !rule->action_ste_selector;
-       }
-
-       rule->pending_wqes = 0;
-       rule->action_ste_idx = -1;
-       rule->status = MLX5HWS_RULE_STATUS_CREATING;
-
-       /* Init default send STE attributes */
-       ste_attr->gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
-       ste_attr->send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
-       ste_attr->send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
-       ste_attr->send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
-
-       /* Init default action apply */
-       apply->tbl_type = tbl->type;
-       apply->common_res = &ctx->common_res[tbl->type];
-       apply->jump_to_action_stc = matcher->action_ste[0].stc.offset;
-       apply->require_dep = 0;
-}
-
-static void hws_rule_move_init(struct mlx5hws_rule *rule,
-                              struct mlx5hws_rule_attr *attr)
-{
-       /* Save the old RTC IDs to be later used in match STE delete */
-       rule->resize_info->rtc_0 = rule->rtc_0;
-       rule->resize_info->rtc_1 = rule->rtc_1;
-       rule->resize_info->rule_idx = attr->rule_idx;
-
-       rule->rtc_0 = 0;
-       rule->rtc_1 = 0;
-
-       rule->pending_wqes = 0;
-       rule->action_ste_idx = -1;
-       rule->action_ste_selector = 0;
-       rule->status = MLX5HWS_RULE_STATUS_CREATING;
-       rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_WRITING;
-}
-
-bool mlx5hws_rule_move_in_progress(struct mlx5hws_rule *rule)
-{
-       return mlx5hws_matcher_is_in_resize(rule->matcher) &&
-              rule->resize_info &&
-              rule->resize_info->state != MLX5HWS_RULE_RESIZE_STATE_IDLE;
-}
-
-static int hws_rule_create_hws(struct mlx5hws_rule *rule,
-                              struct mlx5hws_rule_attr *attr,
-                              u8 mt_idx,
-                              u32 *match_param,
-                              u8 at_idx,
-                              struct mlx5hws_rule_action rule_actions[])
-{
-       struct mlx5hws_action_template *at = &rule->matcher->at[at_idx];
-       struct mlx5hws_match_template *mt = &rule->matcher->mt[mt_idx];
-       bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
-       struct mlx5hws_matcher *matcher = rule->matcher;
-       struct mlx5hws_context *ctx = matcher->tbl->ctx;
-       struct mlx5hws_send_ste_attr ste_attr = {0};
-       struct mlx5hws_send_ring_dep_wqe *dep_wqe;
-       struct mlx5hws_actions_wqe_setter *setter;
-       struct mlx5hws_actions_apply_data apply;
-       struct mlx5hws_send_engine *queue;
-       u8 total_stes, action_stes;
-       bool is_update;
-       int i, ret;
-
-       is_update = !match_param;
-
-       setter = &at->setters[at->num_of_action_stes];
-       total_stes = at->num_of_action_stes + (is_jumbo && !at->only_term);
-       action_stes = total_stes - 1;
-
-       queue = &ctx->send_queue[attr->queue_id];
-       if (unlikely(mlx5hws_send_engine_err(queue)))
-               return -EIO;
-
-       hws_rule_create_init(rule, &ste_attr, &apply, is_update);
-
-       /* Allocate dependent match WQE since rule might have dependent writes.
-        * The queued dependent WQE can be later aborted or kept as a dependency.
-        * dep_wqe buffers (ctrl, data) are also reused for all STE writes.
-        */
-       dep_wqe = mlx5hws_send_add_new_dep_wqe(queue);
-       hws_rule_init_dep_wqe(dep_wqe, rule, mt, attr);
-
-       ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
-       ste_attr.wqe_data = &dep_wqe->wqe_data;
-       apply.wqe_ctrl = &dep_wqe->wqe_ctrl;
-       apply.wqe_data = (__force __be32 *)&dep_wqe->wqe_data;
-       apply.rule_action = rule_actions;
-       apply.queue = queue;
-
-       if (action_stes) {
-               /* Allocate action STEs for rules that need more than match STE */
-               if (!is_update) {
-                       ret = hws_rule_alloc_action_ste(rule, attr);
-                       if (ret) {
-                               mlx5hws_err(ctx, "Failed to allocate action memory %d", ret);
-                               mlx5hws_send_abort_new_dep_wqe(queue);
-                               return ret;
-                       }
-               }
-               /* Skip RX/TX based on the dep_wqe init */
-               ste_attr.rtc_0 = dep_wqe->rtc_0 ?
-                                matcher->action_ste[rule->action_ste_selector].rtc_0_id : 0;
-               ste_attr.rtc_1 = dep_wqe->rtc_1 ?
-                                matcher->action_ste[rule->action_ste_selector].rtc_1_id : 0;
-               /* Action STEs are written to a specific index last to first */
-               ste_attr.direct_index = rule->action_ste_idx + action_stes;
-               apply.next_direct_idx = ste_attr.direct_index;
-       } else {
-               apply.next_direct_idx = 0;
-       }
-
-       for (i = total_stes; i-- > 0;) {
-               mlx5hws_action_apply_setter(&apply, setter--, !i && is_jumbo);
-
-               if (i == 0) {
-                       /* Handle last match STE.
-                        * For hash split / linear lookup RTCs, packets reaching any STE
-                        * will always match and perform the specified actions, which
-                        * makes the tag irrelevant.
-                        */
-                       if (likely(!mlx5hws_matcher_is_insert_by_idx(matcher) && !is_update))
-                               mlx5hws_definer_create_tag(match_param, mt->fc, mt->fc_sz,
-                                                          (u8 *)dep_wqe->wqe_data.action);
-                       else if (is_update)
-                               hws_rule_update_copy_tag(rule, &dep_wqe->wqe_data, is_jumbo);
-
-                       /* Rule has dependent WQEs, match dep_wqe is queued */
-                       if (action_stes || apply.require_dep)
-                               break;
-
-                       /* Rule has no dependencies, abort dep_wqe and send WQE now */
-                       mlx5hws_send_abort_new_dep_wqe(queue);
-                       ste_attr.wqe_tag_is_jumbo = is_jumbo;
-                       ste_attr.send_attr.notify_hw = !attr->burst;
-                       ste_attr.send_attr.user_data = dep_wqe->user_data;
-                       ste_attr.send_attr.rule = dep_wqe->rule;
-                       ste_attr.rtc_0 = dep_wqe->rtc_0;
-                       ste_attr.rtc_1 = dep_wqe->rtc_1;
-                       ste_attr.used_id_rtc_0 = &rule->rtc_0;
-                       ste_attr.used_id_rtc_1 = &rule->rtc_1;
-                       ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
-                       ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
-                       ste_attr.direct_index = dep_wqe->direct_index;
-               } else {
-                       apply.next_direct_idx = --ste_attr.direct_index;
-               }
-
-               mlx5hws_send_ste(queue, &ste_attr);
-       }
-
-       /* Backup TAG on the rule for deletion and resize info for
-        * moving rules to a new matcher, only after insertion.
-        */
-       if (!is_update)
-               hws_rule_save_delete_info(rule, &ste_attr);
-
-       hws_rule_save_resize_info(rule, &ste_attr, is_update);
-       mlx5hws_send_engine_inc_rule(queue);
-
-       if (!attr->burst)
-               mlx5hws_send_all_dep_wqe(queue);
-
-       return 0;
-}
-
-static void hws_rule_destroy_failed_hws(struct mlx5hws_rule *rule,
-                                       struct mlx5hws_rule_attr *attr)
-{
-       struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
-       struct mlx5hws_send_engine *queue;
-
-       queue = &ctx->send_queue[attr->queue_id];
-
-       hws_rule_gen_comp(queue, rule, false,
-                         attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
-
-       /* Rule failed now we can safely release action STEs */
-       mlx5hws_rule_free_action_ste(rule);
-
-       /* Clear complex tag */
-       hws_rule_clear_delete_info(rule);
-
-       /* Clear info that was saved for resizing */
-       mlx5hws_rule_clear_resize_info(rule);
-
-       /* If a rule that was indicated as burst (need to trigger HW) has failed
-        * insertion we won't ring the HW as nothing is being written to the WQ.
-        * In such case update the last WQE and ring the HW with that work
-        */
-       if (attr->burst)
-               return;
-
-       mlx5hws_send_all_dep_wqe(queue);
-       mlx5hws_send_engine_flush_queue(queue);
-}
-
-static int hws_rule_destroy_hws(struct mlx5hws_rule *rule,
-                               struct mlx5hws_rule_attr *attr)
-{
-       bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
-       struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
-       struct mlx5hws_matcher *matcher = rule->matcher;
-       struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl = {0};
-       struct mlx5hws_send_ste_attr ste_attr = {0};
-       struct mlx5hws_send_engine *queue;
-
-       queue = &ctx->send_queue[attr->queue_id];
-
-       if (unlikely(mlx5hws_send_engine_err(queue))) {
-               hws_rule_destroy_failed_hws(rule, attr);
-               return 0;
-       }
-
-       /* Rule is not completed yet */
-       if (rule->status == MLX5HWS_RULE_STATUS_CREATING)
-               return -EBUSY;
-
-       /* Rule failed and doesn't require cleanup */
-       if (rule->status == MLX5HWS_RULE_STATUS_FAILED) {
-               hws_rule_destroy_failed_hws(rule, attr);
-               return 0;
-       }
-
-       if (rule->skip_delete) {
-               /* Rule shouldn't be deleted in HW.
-                * Generate completion as if write succeeded, and we can
-                * safely release action STEs and clear resize info.
-                */
-               hws_rule_gen_comp(queue, rule, false,
-                                 attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
-
-               mlx5hws_rule_free_action_ste(rule);
-               mlx5hws_rule_clear_resize_info(rule);
-               return 0;
-       }
-
-       mlx5hws_send_engine_inc_rule(queue);
-
-       /* Send dependent WQE */
-       if (!attr->burst)
-               mlx5hws_send_all_dep_wqe(queue);
-
-       rule->status = MLX5HWS_RULE_STATUS_DELETING;
-
-       ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
-       ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
-       ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
-
-       ste_attr.send_attr.rule = rule;
-       ste_attr.send_attr.notify_hw = !attr->burst;
-       ste_attr.send_attr.user_data = attr->user_data;
-
-       ste_attr.rtc_0 = rule->rtc_0;
-       ste_attr.rtc_1 = rule->rtc_1;
-       ste_attr.used_id_rtc_0 = &rule->rtc_0;
-       ste_attr.used_id_rtc_1 = &rule->rtc_1;
-       ste_attr.wqe_ctrl = &wqe_ctrl;
-       ste_attr.wqe_tag_is_jumbo = is_jumbo;
-       ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE;
-       if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher)))
-               ste_attr.direct_index = attr->rule_idx;
-
-       hws_rule_load_delete_info(rule, &ste_attr);
-       mlx5hws_send_ste(queue, &ste_attr);
-       hws_rule_clear_delete_info(rule);
-
-       return 0;
-}
-
-static int hws_rule_enqueue_precheck(struct mlx5hws_rule *rule,
-                                    struct mlx5hws_rule_attr *attr)
-{
-       struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
-
-       if (unlikely(!attr->user_data))
-               return -EINVAL;
-
-       /* Check if there is room in queue */
-       if (unlikely(mlx5hws_send_engine_full(&ctx->send_queue[attr->queue_id])))
-               return -EBUSY;
-
-       return 0;
-}
-
-static int hws_rule_enqueue_precheck_move(struct mlx5hws_rule *rule,
-                                         struct mlx5hws_rule_attr *attr)
-{
-       if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED))
-               return -EINVAL;
-
-       return hws_rule_enqueue_precheck(rule, attr);
-}
-
-static int hws_rule_enqueue_precheck_create(struct mlx5hws_rule *rule,
-                                           struct mlx5hws_rule_attr *attr)
-{
-       if (unlikely(mlx5hws_matcher_is_in_resize(rule->matcher)))
-               /* Matcher in resize - new rules are not allowed */
-               return -EAGAIN;
-
-       return hws_rule_enqueue_precheck(rule, attr);
-}
-
-static int hws_rule_enqueue_precheck_update(struct mlx5hws_rule *rule,
-                                           struct mlx5hws_rule_attr *attr)
-{
-       struct mlx5hws_matcher *matcher = rule->matcher;
-
-       if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher) &&
-                    !matcher->attr.optimize_using_rule_idx &&
-                    !mlx5hws_matcher_is_insert_by_idx(matcher))) {
-               return -EOPNOTSUPP;
-       }
-
-       if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED))
-               return -EBUSY;
-
-       return hws_rule_enqueue_precheck_create(rule, attr);
-}
-
-int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
-                                void *queue_ptr,
-                                void *user_data)
-{
-       bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
-       struct mlx5hws_wqe_gta_ctrl_seg empty_wqe_ctrl = {0};
-       struct mlx5hws_matcher *matcher = rule->matcher;
-       struct mlx5hws_send_engine *queue = queue_ptr;
-       struct mlx5hws_send_ste_attr ste_attr = {0};
-
-       mlx5hws_send_all_dep_wqe(queue);
-
-       rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_DELETING;
-
-       ste_attr.send_attr.fence = 0;
-       ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
-       ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
-       ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
-       ste_attr.send_attr.rule = rule;
-       ste_attr.send_attr.notify_hw = 1;
-       ste_attr.send_attr.user_data = user_data;
-       ste_attr.rtc_0 = rule->resize_info->rtc_0;
-       ste_attr.rtc_1 = rule->resize_info->rtc_1;
-       ste_attr.used_id_rtc_0 = &rule->resize_info->rtc_0;
-       ste_attr.used_id_rtc_1 = &rule->resize_info->rtc_1;
-       ste_attr.wqe_ctrl = &empty_wqe_ctrl;
-       ste_attr.wqe_tag_is_jumbo = is_jumbo;
-       ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE;
-
-       if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher)))
-               ste_attr.direct_index = rule->resize_info->rule_idx;
-
-       hws_rule_load_delete_info(rule, &ste_attr);
-       mlx5hws_send_ste(queue, &ste_attr);
-
-       return 0;
-}
-
-int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule,
-                             struct mlx5hws_rule_attr *attr)
-{
-       bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
-       struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
-       struct mlx5hws_matcher *matcher = rule->matcher;
-       struct mlx5hws_send_ste_attr ste_attr = {0};
-       struct mlx5hws_send_engine *queue;
-       int ret;
-
-       ret = hws_rule_enqueue_precheck_move(rule, attr);
-       if (unlikely(ret))
-               return ret;
-
-       queue = &ctx->send_queue[attr->queue_id];
-
-       ret = mlx5hws_send_engine_err(queue);
-       if (ret)
-               return ret;
-
-       hws_rule_move_init(rule, attr);
-       hws_rule_move_get_rtc(rule, &ste_attr);
-
-       ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
-       ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
-       ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
-       ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
-       ste_attr.wqe_tag_is_jumbo = is_jumbo;
-
-       ste_attr.send_attr.rule = rule;
-       ste_attr.send_attr.fence = 0;
-       ste_attr.send_attr.notify_hw = !attr->burst;
-       ste_attr.send_attr.user_data = attr->user_data;
-
-       ste_attr.used_id_rtc_0 = &rule->rtc_0;
-       ste_attr.used_id_rtc_1 = &rule->rtc_1;
-       ste_attr.wqe_ctrl = (struct mlx5hws_wqe_gta_ctrl_seg *)rule->resize_info->ctrl_seg;
-       ste_attr.wqe_data = (struct mlx5hws_wqe_gta_data_seg_ste *)rule->resize_info->data_seg;
-       ste_attr.direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ?
-                               attr->rule_idx : 0;
-
-       mlx5hws_send_ste(queue, &ste_attr);
-       mlx5hws_send_engine_inc_rule(queue);
-
-       if (!attr->burst)
-               mlx5hws_send_all_dep_wqe(queue);
-
-       return 0;
-}
-
-int mlx5hws_rule_create(struct mlx5hws_matcher *matcher,
-                       u8 mt_idx,
-                       u32 *match_param,
-                       u8 at_idx,
-                       struct mlx5hws_rule_action rule_actions[],
-                       struct mlx5hws_rule_attr *attr,
-                       struct mlx5hws_rule *rule_handle)
-{
-       int ret;
-
-       rule_handle->matcher = matcher;
-
-       ret = hws_rule_enqueue_precheck_create(rule_handle, attr);
-       if (unlikely(ret))
-               return ret;
-
-       if (unlikely(!(matcher->num_of_mt >= mt_idx) ||
-                    !(matcher->num_of_at >= at_idx) ||
-                    !match_param)) {
-               pr_warn("HWS: Invalid rule creation parameters (MTs, ATs or match params)\n");
-               return -EINVAL;
-       }
-
-       ret = hws_rule_create_hws(rule_handle,
-                                 attr,
-                                 mt_idx,
-                                 match_param,
-                                 at_idx,
-                                 rule_actions);
-
-       return ret;
-}
-
-int mlx5hws_rule_destroy(struct mlx5hws_rule *rule,
-                        struct mlx5hws_rule_attr *attr)
-{
-       int ret;
-
-       ret = hws_rule_enqueue_precheck(rule, attr);
-       if (unlikely(ret))
-               return ret;
-
-       ret = hws_rule_destroy_hws(rule, attr);
-
-       return ret;
-}
-
-int mlx5hws_rule_action_update(struct mlx5hws_rule *rule,
-                              u8 at_idx,
-                              struct mlx5hws_rule_action rule_actions[],
-                              struct mlx5hws_rule_attr *attr)
-{
-       int ret;
-
-       ret = hws_rule_enqueue_precheck_update(rule, attr);
-       if (unlikely(ret))
-               return ret;
-
-       ret = hws_rule_create_hws(rule,
-                                 attr,
-                                 0,
-                                 NULL,
-                                 at_idx,
-                                 rule_actions);
-
-       return ret;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h
deleted file mode 100644 (file)
index 495cdd1..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#ifndef MLX5HWS_RULE_H_
-#define MLX5HWS_RULE_H_
-
-enum {
-       MLX5HWS_STE_CTRL_SZ = 20,
-       MLX5HWS_ACTIONS_SZ = 12,
-       MLX5HWS_MATCH_TAG_SZ = 32,
-       MLX5HWS_JUMBO_TAG_SZ = 44,
-};
-
-enum mlx5hws_rule_status {
-       MLX5HWS_RULE_STATUS_UNKNOWN,
-       MLX5HWS_RULE_STATUS_CREATING,
-       MLX5HWS_RULE_STATUS_CREATED,
-       MLX5HWS_RULE_STATUS_DELETING,
-       MLX5HWS_RULE_STATUS_DELETED,
-       MLX5HWS_RULE_STATUS_FAILING,
-       MLX5HWS_RULE_STATUS_FAILED,
-};
-
-enum mlx5hws_rule_move_state {
-       MLX5HWS_RULE_RESIZE_STATE_IDLE,
-       MLX5HWS_RULE_RESIZE_STATE_WRITING,
-       MLX5HWS_RULE_RESIZE_STATE_DELETING,
-};
-
-enum mlx5hws_rule_jumbo_match_tag_offset {
-       MLX5HWS_RULE_JUMBO_MATCH_TAG_OFFSET_DW0 = 8,
-};
-
-struct mlx5hws_rule_match_tag {
-       union {
-               u8 jumbo[MLX5HWS_JUMBO_TAG_SZ];
-               struct {
-                       u8 reserved[MLX5HWS_ACTIONS_SZ];
-                       u8 match[MLX5HWS_MATCH_TAG_SZ];
-               };
-       };
-};
-
-struct mlx5hws_rule_resize_info {
-       struct mlx5hws_pool *action_ste_pool[2];
-       u32 rtc_0;
-       u32 rtc_1;
-       u32 rule_idx;
-       u8 state;
-       u8 max_stes;
-       u8 ctrl_seg[MLX5HWS_WQE_SZ_GTA_CTRL]; /* Ctrl segment of STE: 48 bytes */
-       u8 data_seg[MLX5HWS_WQE_SZ_GTA_DATA]; /* Data segment of STE: 64 bytes */
-};
-
-struct mlx5hws_rule {
-       struct mlx5hws_matcher *matcher;
-       union {
-               struct mlx5hws_rule_match_tag tag;
-               struct mlx5hws_rule_resize_info *resize_info;
-       };
-       u32 rtc_0; /* The RTC into which the STE was inserted */
-       u32 rtc_1; /* The RTC into which the STE was inserted */
-       int action_ste_idx; /* STE array index */
-       u8 status; /* enum mlx5hws_rule_status */
-       u8 action_ste_selector; /* For rule update - which action STE is in use */
-       u8 pending_wqes;
-       bool skip_delete; /* For complex rules - another rule with same tag
-                          * still exists, so don't actually delete this rule.
-                          */
-};
-
-void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule);
-
-int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
-                                void *queue, void *user_data);
-
-int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule,
-                             struct mlx5hws_rule_attr *attr);
-
-bool mlx5hws_rule_move_in_progress(struct mlx5hws_rule *rule);
-
-void mlx5hws_rule_clear_resize_info(struct mlx5hws_rule *rule);
-
-#endif /* MLX5HWS_RULE_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c
deleted file mode 100644 (file)
index 6d443e6..0000000
+++ /dev/null
@@ -1,1231 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#include "mlx5hws_internal.h"
-#include "lib/clock.h"
-
-enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 };
-
-struct mlx5hws_send_ring_dep_wqe *
-mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue)
-{
-       struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
-       unsigned int idx = send_sq->head_dep_idx++ & (queue->num_entries - 1);
-
-       memset(&send_sq->dep_wqe[idx].wqe_data.tag, 0, MLX5HWS_MATCH_TAG_SZ);
-
-       return &send_sq->dep_wqe[idx];
-}
-
-void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue)
-{
-       queue->send_ring.send_sq.head_dep_idx--;
-}
-
-void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue)
-{
-       struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
-       struct mlx5hws_send_ste_attr ste_attr = {0};
-       struct mlx5hws_send_ring_dep_wqe *dep_wqe;
-
-       ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
-       ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
-       ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
-       ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
-
-       /* Fence first from previous depend WQEs  */
-       ste_attr.send_attr.fence = 1;
-
-       while (send_sq->head_dep_idx != send_sq->tail_dep_idx) {
-               dep_wqe = &send_sq->dep_wqe[send_sq->tail_dep_idx++ & (queue->num_entries - 1)];
-
-               /* Notify HW on the last WQE */
-               ste_attr.send_attr.notify_hw = (send_sq->tail_dep_idx == send_sq->head_dep_idx);
-               ste_attr.send_attr.user_data = dep_wqe->user_data;
-               ste_attr.send_attr.rule = dep_wqe->rule;
-
-               ste_attr.rtc_0 = dep_wqe->rtc_0;
-               ste_attr.rtc_1 = dep_wqe->rtc_1;
-               ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
-               ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
-               ste_attr.used_id_rtc_0 = &dep_wqe->rule->rtc_0;
-               ste_attr.used_id_rtc_1 = &dep_wqe->rule->rtc_1;
-               ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
-               ste_attr.wqe_data = &dep_wqe->wqe_data;
-               ste_attr.direct_index = dep_wqe->direct_index;
-
-               mlx5hws_send_ste(queue, &ste_attr);
-
-               /* Fencing is done only on the first WQE */
-               ste_attr.send_attr.fence = 0;
-       }
-}
-
-struct mlx5hws_send_engine_post_ctrl
-mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue)
-{
-       struct mlx5hws_send_engine_post_ctrl ctrl;
-
-       ctrl.queue = queue;
-       /* Currently only one send ring is supported */
-       ctrl.send_ring = &queue->send_ring;
-       ctrl.num_wqebbs = 0;
-
-       return ctrl;
-}
-
-void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl,
-                                     char **buf, size_t *len)
-{
-       struct mlx5hws_send_ring_sq *send_sq = &ctrl->send_ring->send_sq;
-       unsigned int idx;
-
-       idx = (send_sq->cur_post + ctrl->num_wqebbs) & send_sq->buf_mask;
-
-       /* Note that *buf is a single MLX5_SEND_WQE_BB. It cannot be used
-        * as buffer of more than one WQE_BB, since the two MLX5_SEND_WQE_BB
-        * can be on 2 different kernel memory pages.
-        */
-       *buf = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
-       *len = MLX5_SEND_WQE_BB;
-
-       if (!ctrl->num_wqebbs) {
-               *buf += sizeof(struct mlx5hws_wqe_ctrl_seg);
-               *len -= sizeof(struct mlx5hws_wqe_ctrl_seg);
-       }
-
-       ctrl->num_wqebbs++;
-}
-
-static void hws_send_engine_post_ring(struct mlx5hws_send_ring_sq *sq,
-                                     struct mlx5hws_wqe_ctrl_seg *doorbell_cseg)
-{
-       /* ensure wqe is visible to device before updating doorbell record */
-       dma_wmb();
-
-       *sq->wq.db = cpu_to_be32(sq->cur_post);
-
-       /* ensure doorbell record is visible to device before ringing the
-        * doorbell
-        */
-       wmb();
-
-       mlx5_write64((__be32 *)doorbell_cseg, sq->uar_map);
-
-       /* Ensure doorbell is written on uar_page before poll_cq */
-       WRITE_ONCE(doorbell_cseg, NULL);
-}
-
-static void
-hws_send_wqe_set_tag(struct mlx5hws_wqe_gta_data_seg_ste *wqe_data,
-                    struct mlx5hws_rule_match_tag *tag,
-                    bool is_jumbo)
-{
-       if (is_jumbo) {
-               /* Clear previous possibly dirty control */
-               memset(wqe_data, 0, MLX5HWS_STE_CTRL_SZ);
-               memcpy(wqe_data->jumbo, tag->jumbo, MLX5HWS_JUMBO_TAG_SZ);
-       } else {
-               /* Clear previous possibly dirty control and actions */
-               memset(wqe_data, 0, MLX5HWS_STE_CTRL_SZ + MLX5HWS_ACTIONS_SZ);
-               memcpy(wqe_data->tag, tag->match, MLX5HWS_MATCH_TAG_SZ);
-       }
-}
-
-void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl,
-                                 struct mlx5hws_send_engine_post_attr *attr)
-{
-       struct mlx5hws_wqe_ctrl_seg *wqe_ctrl;
-       struct mlx5hws_send_ring_sq *sq;
-       unsigned int idx;
-       u32 flags = 0;
-
-       sq = &ctrl->send_ring->send_sq;
-       idx = sq->cur_post & sq->buf_mask;
-       sq->last_idx = idx;
-
-       wqe_ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, idx);
-
-       wqe_ctrl->opmod_idx_opcode =
-               cpu_to_be32((attr->opmod << 24) |
-                           ((sq->cur_post & 0xffff) << 8) |
-                           attr->opcode);
-       wqe_ctrl->qpn_ds =
-               cpu_to_be32((attr->len + sizeof(struct mlx5hws_wqe_ctrl_seg)) / 16 |
-                                sq->sqn << 8);
-       wqe_ctrl->imm = cpu_to_be32(attr->id);
-
-       flags |= attr->notify_hw ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
-       flags |= attr->fence ? MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE : 0;
-       wqe_ctrl->flags = cpu_to_be32(flags);
-
-       sq->wr_priv[idx].id = attr->id;
-       sq->wr_priv[idx].retry_id = attr->retry_id;
-
-       sq->wr_priv[idx].rule = attr->rule;
-       sq->wr_priv[idx].user_data = attr->user_data;
-       sq->wr_priv[idx].num_wqebbs = ctrl->num_wqebbs;
-
-       if (attr->rule) {
-               sq->wr_priv[idx].rule->pending_wqes++;
-               sq->wr_priv[idx].used_id = attr->used_id;
-       }
-
-       sq->cur_post += ctrl->num_wqebbs;
-
-       if (attr->notify_hw)
-               hws_send_engine_post_ring(sq, wqe_ctrl);
-}
-
-static void hws_send_wqe(struct mlx5hws_send_engine *queue,
-                        struct mlx5hws_send_engine_post_attr *send_attr,
-                        struct mlx5hws_wqe_gta_ctrl_seg *send_wqe_ctrl,
-                        void *send_wqe_data,
-                        void *send_wqe_tag,
-                        bool is_jumbo,
-                        u8 gta_opcode,
-                        u32 direct_index)
-{
-       struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
-       struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
-       struct mlx5hws_send_engine_post_ctrl ctrl;
-       size_t wqe_len;
-
-       ctrl = mlx5hws_send_engine_post_start(queue);
-       mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
-       mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_data, &wqe_len);
-
-       wqe_ctrl->op_dirix = cpu_to_be32(gta_opcode << 28 | direct_index);
-       memcpy(wqe_ctrl->stc_ix, send_wqe_ctrl->stc_ix,
-              sizeof(send_wqe_ctrl->stc_ix));
-
-       if (send_wqe_data)
-               memcpy(wqe_data, send_wqe_data, sizeof(*wqe_data));
-       else
-               hws_send_wqe_set_tag(wqe_data, send_wqe_tag, is_jumbo);
-
-       mlx5hws_send_engine_post_end(&ctrl, send_attr);
-}
-
-void mlx5hws_send_ste(struct mlx5hws_send_engine *queue,
-                     struct mlx5hws_send_ste_attr *ste_attr)
-{
-       struct mlx5hws_send_engine_post_attr *send_attr = &ste_attr->send_attr;
-       u8 notify_hw = send_attr->notify_hw;
-       u8 fence = send_attr->fence;
-
-       if (ste_attr->rtc_1) {
-               send_attr->id = ste_attr->rtc_1;
-               send_attr->used_id = ste_attr->used_id_rtc_1;
-               send_attr->retry_id = ste_attr->retry_rtc_1;
-               send_attr->fence = fence;
-               send_attr->notify_hw = notify_hw && !ste_attr->rtc_0;
-               hws_send_wqe(queue, send_attr,
-                            ste_attr->wqe_ctrl,
-                            ste_attr->wqe_data,
-                            ste_attr->wqe_tag,
-                            ste_attr->wqe_tag_is_jumbo,
-                            ste_attr->gta_opcode,
-                            ste_attr->direct_index);
-       }
-
-       if (ste_attr->rtc_0) {
-               send_attr->id = ste_attr->rtc_0;
-               send_attr->used_id = ste_attr->used_id_rtc_0;
-               send_attr->retry_id = ste_attr->retry_rtc_0;
-               send_attr->fence = fence && !ste_attr->rtc_1;
-               send_attr->notify_hw = notify_hw;
-               hws_send_wqe(queue, send_attr,
-                            ste_attr->wqe_ctrl,
-                            ste_attr->wqe_data,
-                            ste_attr->wqe_tag,
-                            ste_attr->wqe_tag_is_jumbo,
-                            ste_attr->gta_opcode,
-                            ste_attr->direct_index);
-       }
-
-       /* Restore to original requested values */
-       send_attr->notify_hw = notify_hw;
-       send_attr->fence = fence;
-}
-
-static void hws_send_engine_retry_post_send(struct mlx5hws_send_engine *queue,
-                                           struct mlx5hws_send_ring_priv *priv,
-                                           u16 wqe_cnt)
-{
-       struct mlx5hws_send_engine_post_attr send_attr = {0};
-       struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
-       struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
-       struct mlx5hws_send_engine_post_ctrl ctrl;
-       struct mlx5hws_send_ring_sq *send_sq;
-       unsigned int idx;
-       size_t wqe_len;
-       char *p;
-
-       send_attr.rule = priv->rule;
-       send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
-       send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
-       send_attr.len = MLX5_SEND_WQE_BB * 2 - sizeof(struct mlx5hws_wqe_ctrl_seg);
-       send_attr.notify_hw = 1;
-       send_attr.fence = 0;
-       send_attr.user_data = priv->user_data;
-       send_attr.id = priv->retry_id;
-       send_attr.used_id = priv->used_id;
-
-       ctrl = mlx5hws_send_engine_post_start(queue);
-       mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
-       mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_data, &wqe_len);
-
-       send_sq = &ctrl.send_ring->send_sq;
-       idx = wqe_cnt & send_sq->buf_mask;
-       p = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
-
-       /* Copy old gta ctrl */
-       memcpy(wqe_ctrl, p + sizeof(struct mlx5hws_wqe_ctrl_seg),
-              MLX5_SEND_WQE_BB - sizeof(struct mlx5hws_wqe_ctrl_seg));
-
-       idx = (wqe_cnt + 1) & send_sq->buf_mask;
-       p = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
-
-       /* Copy old gta data */
-       memcpy(wqe_data, p, MLX5_SEND_WQE_BB);
-
-       mlx5hws_send_engine_post_end(&ctrl, &send_attr);
-}
-
-void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue)
-{
-       struct mlx5hws_send_ring_sq *sq = &queue->send_ring.send_sq;
-       struct mlx5hws_wqe_ctrl_seg *wqe_ctrl;
-
-       wqe_ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, sq->last_idx);
-       wqe_ctrl->flags |= cpu_to_be32(MLX5_WQE_CTRL_CQ_UPDATE);
-
-       hws_send_engine_post_ring(sq, wqe_ctrl);
-}
-
-static void
-hws_send_engine_update_rule_resize(struct mlx5hws_send_engine *queue,
-                                  struct mlx5hws_send_ring_priv *priv,
-                                  enum mlx5hws_flow_op_status *status)
-{
-       switch (priv->rule->resize_info->state) {
-       case MLX5HWS_RULE_RESIZE_STATE_WRITING:
-               if (priv->rule->status == MLX5HWS_RULE_STATUS_FAILING) {
-                       /* Backup original RTCs */
-                       u32 orig_rtc_0 = priv->rule->resize_info->rtc_0;
-                       u32 orig_rtc_1 = priv->rule->resize_info->rtc_1;
-
-                       /* Delete partially failed move rule using resize_info */
-                       priv->rule->resize_info->rtc_0 = priv->rule->rtc_0;
-                       priv->rule->resize_info->rtc_1 = priv->rule->rtc_1;
-
-                       /* Move rule to original RTC for future delete */
-                       priv->rule->rtc_0 = orig_rtc_0;
-                       priv->rule->rtc_1 = orig_rtc_1;
-               }
-               /* Clean leftovers */
-               mlx5hws_rule_move_hws_remove(priv->rule, queue, priv->user_data);
-               break;
-
-       case MLX5HWS_RULE_RESIZE_STATE_DELETING:
-               if (priv->rule->status == MLX5HWS_RULE_STATUS_FAILING) {
-                       *status = MLX5HWS_FLOW_OP_ERROR;
-               } else {
-                       *status = MLX5HWS_FLOW_OP_SUCCESS;
-                       priv->rule->matcher = priv->rule->matcher->resize_dst;
-               }
-               priv->rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_IDLE;
-               priv->rule->status = MLX5HWS_RULE_STATUS_CREATED;
-               break;
-
-       default:
-               break;
-       }
-}
-
-static void hws_send_engine_update_rule(struct mlx5hws_send_engine *queue,
-                                       struct mlx5hws_send_ring_priv *priv,
-                                       u16 wqe_cnt,
-                                       enum mlx5hws_flow_op_status *status)
-{
-       priv->rule->pending_wqes--;
-
-       if (*status == MLX5HWS_FLOW_OP_ERROR) {
-               if (priv->retry_id) {
-                       hws_send_engine_retry_post_send(queue, priv, wqe_cnt);
-                       return;
-               }
-               /* Some part of the rule failed */
-               priv->rule->status = MLX5HWS_RULE_STATUS_FAILING;
-               *priv->used_id = 0;
-       } else {
-               *priv->used_id = priv->id;
-       }
-
-       /* Update rule status for the last completion */
-       if (!priv->rule->pending_wqes) {
-               if (unlikely(mlx5hws_rule_move_in_progress(priv->rule))) {
-                       hws_send_engine_update_rule_resize(queue, priv, status);
-                       return;
-               }
-
-               if (unlikely(priv->rule->status == MLX5HWS_RULE_STATUS_FAILING)) {
-                       /* Rule completely failed and doesn't require cleanup */
-                       if (!priv->rule->rtc_0 && !priv->rule->rtc_1)
-                               priv->rule->status = MLX5HWS_RULE_STATUS_FAILED;
-
-                       *status = MLX5HWS_FLOW_OP_ERROR;
-               } else {
-                       /* Increase the status, this only works on good flow as the enum
-                        * is arrange it away creating -> created -> deleting -> deleted
-                        */
-                       priv->rule->status++;
-                       *status = MLX5HWS_FLOW_OP_SUCCESS;
-                       /* Rule was deleted now we can safely release action STEs
-                        * and clear resize info
-                        */
-                       if (priv->rule->status == MLX5HWS_RULE_STATUS_DELETED) {
-                               mlx5hws_rule_free_action_ste(priv->rule);
-                               mlx5hws_rule_clear_resize_info(priv->rule);
-                       }
-               }
-       }
-}
-
-static void hws_send_engine_update(struct mlx5hws_send_engine *queue,
-                                  struct mlx5_cqe64 *cqe,
-                                  struct mlx5hws_send_ring_priv *priv,
-                                  struct mlx5hws_flow_op_result res[],
-                                  s64 *i,
-                                  u32 res_nb,
-                                  u16 wqe_cnt)
-{
-       enum mlx5hws_flow_op_status status;
-
-       if (!cqe || (likely(be32_to_cpu(cqe->byte_cnt) >> 31 == 0) &&
-                    likely(get_cqe_opcode(cqe) == MLX5_CQE_REQ))) {
-               status = MLX5HWS_FLOW_OP_SUCCESS;
-       } else {
-               status = MLX5HWS_FLOW_OP_ERROR;
-       }
-
-       if (priv->user_data) {
-               if (priv->rule) {
-                       hws_send_engine_update_rule(queue, priv, wqe_cnt, &status);
-                       /* Completion is provided on the last rule WQE */
-                       if (priv->rule->pending_wqes)
-                               return;
-               }
-
-               if (*i < res_nb) {
-                       res[*i].user_data = priv->user_data;
-                       res[*i].status = status;
-                       (*i)++;
-                       mlx5hws_send_engine_dec_rule(queue);
-               } else {
-                       mlx5hws_send_engine_gen_comp(queue, priv->user_data, status);
-               }
-       }
-}
-
-static int mlx5hws_parse_cqe(struct mlx5hws_send_ring_cq *cq,
-                            struct mlx5_cqe64 *cqe64)
-{
-       if (unlikely(get_cqe_opcode(cqe64) != MLX5_CQE_REQ)) {
-               struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe64;
-
-               mlx5_core_err(cq->mdev, "Bad OP in HWS SQ CQE: 0x%x\n", get_cqe_opcode(cqe64));
-               mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n", err_cqe->vendor_err_synd);
-               mlx5_core_err(cq->mdev, "syndrome=%x\n", err_cqe->syndrome);
-               print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
-                              16, 1, err_cqe,
-                              sizeof(*err_cqe), false);
-               return CQ_POLL_ERR;
-       }
-
-       return CQ_OK;
-}
-
-static int mlx5hws_cq_poll_one(struct mlx5hws_send_ring_cq *cq)
-{
-       struct mlx5_cqe64 *cqe64;
-       int err;
-
-       cqe64 = mlx5_cqwq_get_cqe(&cq->wq);
-       if (!cqe64) {
-               if (unlikely(cq->mdev->state ==
-                            MLX5_DEVICE_STATE_INTERNAL_ERROR)) {
-                       mlx5_core_dbg_once(cq->mdev,
-                                          "Polling CQ while device is shutting down\n");
-                       return CQ_POLL_ERR;
-               }
-               return CQ_EMPTY;
-       }
-
-       mlx5_cqwq_pop(&cq->wq);
-       err = mlx5hws_parse_cqe(cq, cqe64);
-       mlx5_cqwq_update_db_record(&cq->wq);
-
-       return err;
-}
-
-static void hws_send_engine_poll_cq(struct mlx5hws_send_engine *queue,
-                                   struct mlx5hws_flow_op_result res[],
-                                   s64 *polled,
-                                   u32 res_nb)
-{
-       struct mlx5hws_send_ring *send_ring = &queue->send_ring;
-       struct mlx5hws_send_ring_cq *cq = &send_ring->send_cq;
-       struct mlx5hws_send_ring_sq *sq = &send_ring->send_sq;
-       struct mlx5hws_send_ring_priv *priv;
-       struct mlx5_cqe64 *cqe;
-       u8 cqe_opcode;
-       u16 wqe_cnt;
-
-       cqe = mlx5_cqwq_get_cqe(&cq->wq);
-       if (!cqe)
-               return;
-
-       cqe_opcode = get_cqe_opcode(cqe);
-       if (cqe_opcode == MLX5_CQE_INVALID)
-               return;
-
-       if (unlikely(cqe_opcode != MLX5_CQE_REQ))
-               queue->err = true;
-
-       wqe_cnt = be16_to_cpu(cqe->wqe_counter) & sq->buf_mask;
-
-       while (cq->poll_wqe != wqe_cnt) {
-               priv = &sq->wr_priv[cq->poll_wqe];
-               hws_send_engine_update(queue, NULL, priv, res, polled, res_nb, 0);
-               cq->poll_wqe = (cq->poll_wqe + priv->num_wqebbs) & sq->buf_mask;
-       }
-
-       priv = &sq->wr_priv[wqe_cnt];
-       cq->poll_wqe = (wqe_cnt + priv->num_wqebbs) & sq->buf_mask;
-       hws_send_engine_update(queue, cqe, priv, res, polled, res_nb, wqe_cnt);
-       mlx5hws_cq_poll_one(cq);
-}
-
-static void hws_send_engine_poll_list(struct mlx5hws_send_engine *queue,
-                                     struct mlx5hws_flow_op_result res[],
-                                     s64 *polled,
-                                     u32 res_nb)
-{
-       struct mlx5hws_completed_poll *comp = &queue->completed;
-
-       while (comp->ci != comp->pi) {
-               if (*polled < res_nb) {
-                       res[*polled].status =
-                               comp->entries[comp->ci].status;
-                       res[*polled].user_data =
-                               comp->entries[comp->ci].user_data;
-                       (*polled)++;
-                       comp->ci = (comp->ci + 1) & comp->mask;
-                       mlx5hws_send_engine_dec_rule(queue);
-               } else {
-                       return;
-               }
-       }
-}
-
-static int hws_send_engine_poll(struct mlx5hws_send_engine *queue,
-                               struct mlx5hws_flow_op_result res[],
-                               u32 res_nb)
-{
-       s64 polled = 0;
-
-       hws_send_engine_poll_list(queue, res, &polled, res_nb);
-
-       if (polled >= res_nb)
-               return polled;
-
-       hws_send_engine_poll_cq(queue, res, &polled, res_nb);
-
-       return polled;
-}
-
-int mlx5hws_send_queue_poll(struct mlx5hws_context *ctx,
-                           u16 queue_id,
-                           struct mlx5hws_flow_op_result res[],
-                           u32 res_nb)
-{
-       return hws_send_engine_poll(&ctx->send_queue[queue_id], res, res_nb);
-}
-
-static int hws_send_ring_alloc_sq(struct mlx5_core_dev *mdev,
-                                 int numa_node,
-                                 struct mlx5hws_send_engine *queue,
-                                 struct mlx5hws_send_ring_sq *sq,
-                                 void *sqc_data)
-{
-       void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
-       struct mlx5_wq_cyc *wq = &sq->wq;
-       struct mlx5_wq_param param;
-       size_t buf_sz;
-       int err;
-
-       sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
-       sq->mdev = mdev;
-
-       param.db_numa_node = numa_node;
-       param.buf_numa_node = numa_node;
-       err = mlx5_wq_cyc_create(mdev, &param, sqc_wq, wq, &sq->wq_ctrl);
-       if (err)
-               return err;
-       wq->db = &wq->db[MLX5_SND_DBR];
-
-       buf_sz = queue->num_entries * MAX_WQES_PER_RULE;
-       sq->dep_wqe = kcalloc(queue->num_entries, sizeof(*sq->dep_wqe), GFP_KERNEL);
-       if (!sq->dep_wqe) {
-               err = -ENOMEM;
-               goto destroy_wq_cyc;
-       }
-
-       sq->wr_priv = kzalloc(sizeof(*sq->wr_priv) * buf_sz, GFP_KERNEL);
-       if (!sq->wr_priv) {
-               err = -ENOMEM;
-               goto free_dep_wqe;
-       }
-
-       sq->buf_mask = (queue->num_entries * MAX_WQES_PER_RULE) - 1;
-
-       return 0;
-
-free_dep_wqe:
-       kfree(sq->dep_wqe);
-destroy_wq_cyc:
-       mlx5_wq_destroy(&sq->wq_ctrl);
-       return err;
-}
-
-static void hws_send_ring_free_sq(struct mlx5hws_send_ring_sq *sq)
-{
-       if (!sq)
-               return;
-       kfree(sq->wr_priv);
-       kfree(sq->dep_wqe);
-       mlx5_wq_destroy(&sq->wq_ctrl);
-}
-
-static int hws_send_ring_create_sq(struct mlx5_core_dev *mdev, u32 pdn,
-                                  void *sqc_data,
-                                  struct mlx5hws_send_engine *queue,
-                                  struct mlx5hws_send_ring_sq *sq,
-                                  struct mlx5hws_send_ring_cq *cq)
-{
-       void *in, *sqc, *wq;
-       int inlen, err;
-       u8 ts_format;
-
-       inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
-               sizeof(u64) * sq->wq_ctrl.buf.npages;
-       in = kvzalloc(inlen, GFP_KERNEL);
-       if (!in)
-               return -ENOMEM;
-
-       sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
-       wq = MLX5_ADDR_OF(sqc, sqc, wq);
-
-       memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
-       MLX5_SET(sqc, sqc, cqn, cq->mcq.cqn);
-
-       MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
-       MLX5_SET(sqc, sqc, flush_in_error_en, 1);
-
-       ts_format = mlx5_is_real_time_sq(mdev) ? MLX5_TIMESTAMP_FORMAT_REAL_TIME :
-                                                MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
-       MLX5_SET(sqc, sqc, ts_format, ts_format);
-
-       MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
-       MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
-       MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
-       MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
-
-       mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
-                                 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
-
-       err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
-
-       kvfree(in);
-
-       return err;
-}
-
-static void hws_send_ring_destroy_sq(struct mlx5_core_dev *mdev,
-                                    struct mlx5hws_send_ring_sq *sq)
-{
-       mlx5_core_destroy_sq(mdev, sq->sqn);
-}
-
-static int hws_send_ring_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn)
-{
-       void *in, *sqc;
-       int inlen, err;
-
-       inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
-       in = kvzalloc(inlen, GFP_KERNEL);
-       if (!in)
-               return -ENOMEM;
-
-       MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
-       sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
-       MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
-
-       err = mlx5_core_modify_sq(mdev, sqn, in);
-
-       kvfree(in);
-
-       return err;
-}
-
-static void hws_send_ring_close_sq(struct mlx5hws_send_ring_sq *sq)
-{
-       mlx5_core_destroy_sq(sq->mdev, sq->sqn);
-       mlx5_wq_destroy(&sq->wq_ctrl);
-       kfree(sq->wr_priv);
-       kfree(sq->dep_wqe);
-}
-
-static int hws_send_ring_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn,
-                                      void *sqc_data,
-                                      struct mlx5hws_send_engine *queue,
-                                      struct mlx5hws_send_ring_sq *sq,
-                                      struct mlx5hws_send_ring_cq *cq)
-{
-       int err;
-
-       err = hws_send_ring_create_sq(mdev, pdn, sqc_data, queue, sq, cq);
-       if (err)
-               return err;
-
-       err = hws_send_ring_set_sq_rdy(mdev, sq->sqn);
-       if (err)
-               hws_send_ring_destroy_sq(mdev, sq);
-
-       return err;
-}
-
-static int hws_send_ring_open_sq(struct mlx5hws_context *ctx,
-                                int numa_node,
-                                struct mlx5hws_send_engine *queue,
-                                struct mlx5hws_send_ring_sq *sq,
-                                struct mlx5hws_send_ring_cq *cq)
-{
-       size_t buf_sz, sq_log_buf_sz;
-       void *sqc_data, *wq;
-       int err;
-
-       sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
-       if (!sqc_data)
-               return -ENOMEM;
-
-       buf_sz = queue->num_entries * MAX_WQES_PER_RULE;
-       sq_log_buf_sz = ilog2(roundup_pow_of_two(buf_sz));
-
-       wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
-       MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
-       MLX5_SET(wq, wq, pd, ctx->pd_num);
-       MLX5_SET(wq, wq, log_wq_sz, sq_log_buf_sz);
-
-       err = hws_send_ring_alloc_sq(ctx->mdev, numa_node, queue, sq, sqc_data);
-       if (err)
-               goto err_free_sqc;
-
-       err = hws_send_ring_create_sq_rdy(ctx->mdev, ctx->pd_num, sqc_data,
-                                         queue, sq, cq);
-       if (err)
-               goto err_free_sq;
-
-       kvfree(sqc_data);
-
-       return 0;
-err_free_sq:
-       hws_send_ring_free_sq(sq);
-err_free_sqc:
-       kvfree(sqc_data);
-       return err;
-}
-
-static void hws_cq_complete(struct mlx5_core_cq *mcq,
-                           struct mlx5_eqe *eqe)
-{
-       pr_err("CQ completion CQ: #%u\n", mcq->cqn);
-}
-
-static int hws_send_ring_alloc_cq(struct mlx5_core_dev *mdev,
-                                 int numa_node,
-                                 struct mlx5hws_send_engine *queue,
-                                 void *cqc_data,
-                                 struct mlx5hws_send_ring_cq *cq)
-{
-       struct mlx5_core_cq *mcq = &cq->mcq;
-       struct mlx5_wq_param param;
-       struct mlx5_cqe64 *cqe;
-       int err;
-       u32 i;
-
-       param.buf_numa_node = numa_node;
-       param.db_numa_node = numa_node;
-
-       err = mlx5_cqwq_create(mdev, &param, cqc_data, &cq->wq, &cq->wq_ctrl);
-       if (err)
-               return err;
-
-       mcq->cqe_sz = 64;
-       mcq->set_ci_db = cq->wq_ctrl.db.db;
-       mcq->arm_db = cq->wq_ctrl.db.db + 1;
-       mcq->comp = hws_cq_complete;
-
-       for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
-               cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
-               cqe->op_own = 0xf1;
-       }
-
-       cq->mdev = mdev;
-
-       return 0;
-}
-
-static int hws_send_ring_create_cq(struct mlx5_core_dev *mdev,
-                                  struct mlx5hws_send_engine *queue,
-                                  void *cqc_data,
-                                  struct mlx5hws_send_ring_cq *cq)
-{
-       u32 out[MLX5_ST_SZ_DW(create_cq_out)];
-       struct mlx5_core_cq *mcq = &cq->mcq;
-       void *in, *cqc;
-       int inlen, eqn;
-       int err;
-
-       err = mlx5_comp_eqn_get(mdev, 0, &eqn);
-       if (err)
-               return err;
-
-       inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
-               sizeof(u64) * cq->wq_ctrl.buf.npages;
-       in = kvzalloc(inlen, GFP_KERNEL);
-       if (!in)
-               return -ENOMEM;
-
-       cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
-       memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
-       mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
-                                 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
-
-       MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
-       MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
-       MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
-       MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
-
-       err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
-
-       kvfree(in);
-
-       return err;
-}
-
-static int hws_send_ring_open_cq(struct mlx5_core_dev *mdev,
-                                struct mlx5hws_send_engine *queue,
-                                int numa_node,
-                                struct mlx5hws_send_ring_cq *cq)
-{
-       void *cqc_data;
-       int err;
-
-       cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
-       if (!cqc_data)
-               return -ENOMEM;
-
-       MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
-       MLX5_SET(cqc, cqc_data, cqe_sz, queue->num_entries);
-       MLX5_SET(cqc, cqc_data, log_cq_size, ilog2(queue->num_entries));
-
-       err = hws_send_ring_alloc_cq(mdev, numa_node, queue, cqc_data, cq);
-       if (err)
-               goto err_out;
-
-       err = hws_send_ring_create_cq(mdev, queue, cqc_data, cq);
-       if (err)
-               goto err_free_cq;
-
-       kvfree(cqc_data);
-
-       return 0;
-
-err_free_cq:
-       mlx5_wq_destroy(&cq->wq_ctrl);
-err_out:
-       kvfree(cqc_data);
-       return err;
-}
-
-static void hws_send_ring_close_cq(struct mlx5hws_send_ring_cq *cq)
-{
-       mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
-       mlx5_wq_destroy(&cq->wq_ctrl);
-}
-
-static void hws_send_ring_close(struct mlx5hws_send_engine *queue)
-{
-       hws_send_ring_close_sq(&queue->send_ring.send_sq);
-       hws_send_ring_close_cq(&queue->send_ring.send_cq);
-}
-
-static int mlx5hws_send_ring_open(struct mlx5hws_context *ctx,
-                                 struct mlx5hws_send_engine *queue)
-{
-       int numa_node = dev_to_node(mlx5_core_dma_dev(ctx->mdev));
-       struct mlx5hws_send_ring *ring = &queue->send_ring;
-       int err;
-
-       err = hws_send_ring_open_cq(ctx->mdev, queue, numa_node, &ring->send_cq);
-       if (err)
-               return err;
-
-       err = hws_send_ring_open_sq(ctx, numa_node, queue, &ring->send_sq,
-                                   &ring->send_cq);
-       if (err)
-               goto close_cq;
-
-       return err;
-
-close_cq:
-       hws_send_ring_close_cq(&ring->send_cq);
-       return err;
-}
-
-void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue)
-{
-       hws_send_ring_close(queue);
-       kfree(queue->completed.entries);
-}
-
-int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
-                           struct mlx5hws_send_engine *queue,
-                           u16 queue_size)
-{
-       int err;
-
-       mutex_init(&queue->lock);
-
-       queue->num_entries = roundup_pow_of_two(queue_size);
-       queue->used_entries = 0;
-
-       queue->completed.entries = kcalloc(queue->num_entries,
-                                          sizeof(queue->completed.entries[0]),
-                                          GFP_KERNEL);
-       if (!queue->completed.entries)
-               return -ENOMEM;
-
-       queue->completed.pi = 0;
-       queue->completed.ci = 0;
-       queue->completed.mask = queue->num_entries - 1;
-       err = mlx5hws_send_ring_open(ctx, queue);
-       if (err)
-               goto free_completed_entries;
-
-       return 0;
-
-free_completed_entries:
-       kfree(queue->completed.entries);
-       return err;
-}
-
-static void __hws_send_queues_close(struct mlx5hws_context *ctx, u16 queues)
-{
-       while (queues--)
-               mlx5hws_send_queue_close(&ctx->send_queue[queues]);
-}
-
-static void hws_send_queues_bwc_locks_destroy(struct mlx5hws_context *ctx)
-{
-       int bwc_queues = mlx5hws_bwc_queues(ctx);
-       int i;
-
-       if (!mlx5hws_context_bwc_supported(ctx))
-               return;
-
-       for (i = 0; i < bwc_queues; i++) {
-               mutex_destroy(&ctx->bwc_send_queue_locks[i]);
-               lockdep_unregister_key(ctx->bwc_lock_class_keys + i);
-       }
-
-       kfree(ctx->bwc_lock_class_keys);
-       kfree(ctx->bwc_send_queue_locks);
-}
-
-void mlx5hws_send_queues_close(struct mlx5hws_context *ctx)
-{
-       hws_send_queues_bwc_locks_destroy(ctx);
-       __hws_send_queues_close(ctx, ctx->queues);
-       kfree(ctx->send_queue);
-}
-
-static int hws_bwc_send_queues_init(struct mlx5hws_context *ctx)
-{
-       /* Number of BWC queues is equal to number of the usual HWS queues */
-       int bwc_queues = ctx->queues - 1;
-       int i;
-
-       if (!mlx5hws_context_bwc_supported(ctx))
-               return 0;
-
-       ctx->queues += bwc_queues;
-
-       ctx->bwc_send_queue_locks = kcalloc(bwc_queues,
-                                           sizeof(*ctx->bwc_send_queue_locks),
-                                           GFP_KERNEL);
-
-       if (!ctx->bwc_send_queue_locks)
-               return -ENOMEM;
-
-       ctx->bwc_lock_class_keys = kcalloc(bwc_queues,
-                                          sizeof(*ctx->bwc_lock_class_keys),
-                                          GFP_KERNEL);
-       if (!ctx->bwc_lock_class_keys)
-               goto err_lock_class_keys;
-
-       for (i = 0; i < bwc_queues; i++) {
-               mutex_init(&ctx->bwc_send_queue_locks[i]);
-               lockdep_register_key(ctx->bwc_lock_class_keys + i);
-       }
-
-       return 0;
-
-err_lock_class_keys:
-       kfree(ctx->bwc_send_queue_locks);
-       return -ENOMEM;
-}
-
-int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
-                            u16 queues,
-                            u16 queue_size)
-{
-       int err = 0;
-       u32 i;
-
-       /* Open one extra queue for control path */
-       ctx->queues = queues + 1;
-
-       /* open a separate set of queues and locks for bwc API */
-       err = hws_bwc_send_queues_init(ctx);
-       if (err)
-               return err;
-
-       ctx->send_queue = kcalloc(ctx->queues, sizeof(*ctx->send_queue), GFP_KERNEL);
-       if (!ctx->send_queue) {
-               err = -ENOMEM;
-               goto free_bwc_locks;
-       }
-
-       for (i = 0; i < ctx->queues; i++) {
-               err = mlx5hws_send_queue_open(ctx, &ctx->send_queue[i], queue_size);
-               if (err)
-                       goto close_send_queues;
-       }
-
-       return 0;
-
-close_send_queues:
-        __hws_send_queues_close(ctx, i);
-
-       kfree(ctx->send_queue);
-
-free_bwc_locks:
-       hws_send_queues_bwc_locks_destroy(ctx);
-
-       return err;
-}
-
-int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
-                             u16 queue_id,
-                             u32 actions)
-{
-       struct mlx5hws_send_ring_sq *send_sq;
-       struct mlx5hws_send_engine *queue;
-       bool wait_comp = false;
-       s64 polled = 0;
-
-       queue = &ctx->send_queue[queue_id];
-       send_sq = &queue->send_ring.send_sq;
-
-       switch (actions) {
-       case MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC:
-               wait_comp = true;
-               fallthrough;
-       case MLX5HWS_SEND_QUEUE_ACTION_DRAIN_ASYNC:
-               if (send_sq->head_dep_idx != send_sq->tail_dep_idx)
-                       /* Send dependent WQEs to drain the queue */
-                       mlx5hws_send_all_dep_wqe(queue);
-               else
-                       /* Signal on the last posted WQE */
-                       mlx5hws_send_engine_flush_queue(queue);
-
-               /* Poll queue until empty */
-               while (wait_comp && !mlx5hws_send_engine_empty(queue))
-                       hws_send_engine_poll_cq(queue, NULL, &polled, 0);
-
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int
-hws_send_wqe_fw(struct mlx5_core_dev *mdev,
-               u32 pd_num,
-               struct mlx5hws_send_engine_post_attr *send_attr,
-               struct mlx5hws_wqe_gta_ctrl_seg *send_wqe_ctrl,
-               void *send_wqe_match_data,
-               void *send_wqe_match_tag,
-               void *send_wqe_range_data,
-               void *send_wqe_range_tag,
-               bool is_jumbo,
-               u8 gta_opcode)
-{
-       bool has_range = send_wqe_range_data || send_wqe_range_tag;
-       bool has_match = send_wqe_match_data || send_wqe_match_tag;
-       struct mlx5hws_wqe_gta_data_seg_ste gta_wqe_data0 = {0};
-       struct mlx5hws_wqe_gta_data_seg_ste gta_wqe_data1 = {0};
-       struct mlx5hws_wqe_gta_ctrl_seg gta_wqe_ctrl = {0};
-       struct mlx5hws_cmd_generate_wqe_attr attr = {0};
-       struct mlx5hws_wqe_ctrl_seg wqe_ctrl = {0};
-       struct mlx5_cqe64 cqe;
-       u32 flags = 0;
-       int ret;
-
-       /* Set WQE control */
-       wqe_ctrl.opmod_idx_opcode = cpu_to_be32((send_attr->opmod << 24) | send_attr->opcode);
-       wqe_ctrl.qpn_ds = cpu_to_be32((send_attr->len + sizeof(struct mlx5hws_wqe_ctrl_seg)) / 16);
-       flags |= send_attr->notify_hw ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
-       wqe_ctrl.flags = cpu_to_be32(flags);
-       wqe_ctrl.imm = cpu_to_be32(send_attr->id);
-
-       /* Set GTA WQE CTRL */
-       memcpy(gta_wqe_ctrl.stc_ix, send_wqe_ctrl->stc_ix, sizeof(send_wqe_ctrl->stc_ix));
-       gta_wqe_ctrl.op_dirix = cpu_to_be32(gta_opcode << 28);
-
-       /* Set GTA match WQE DATA */
-       if (has_match) {
-               if (send_wqe_match_data)
-                       memcpy(&gta_wqe_data0, send_wqe_match_data, sizeof(gta_wqe_data0));
-               else
-                       hws_send_wqe_set_tag(&gta_wqe_data0, send_wqe_match_tag, is_jumbo);
-
-               gta_wqe_data0.rsvd1_definer = cpu_to_be32(send_attr->match_definer_id << 8);
-               attr.gta_data_0 = (u8 *)&gta_wqe_data0;
-       }
-
-       /* Set GTA range WQE DATA */
-       if (has_range) {
-               if (send_wqe_range_data)
-                       memcpy(&gta_wqe_data1, send_wqe_range_data, sizeof(gta_wqe_data1));
-               else
-                       hws_send_wqe_set_tag(&gta_wqe_data1, send_wqe_range_tag, false);
-
-               gta_wqe_data1.rsvd1_definer = cpu_to_be32(send_attr->range_definer_id << 8);
-               attr.gta_data_1 = (u8 *)&gta_wqe_data1;
-       }
-
-       attr.pdn = pd_num;
-       attr.wqe_ctrl = (u8 *)&wqe_ctrl;
-       attr.gta_ctrl = (u8 *)&gta_wqe_ctrl;
-
-send_wqe:
-       ret = mlx5hws_cmd_generate_wqe(mdev, &attr, &cqe);
-       if (ret) {
-               mlx5_core_err(mdev, "Failed to write WQE using command");
-               return ret;
-       }
-
-       if ((get_cqe_opcode(&cqe) == MLX5_CQE_REQ) &&
-           (be32_to_cpu(cqe.byte_cnt) >> 31 == 0)) {
-               *send_attr->used_id = send_attr->id;
-               return 0;
-       }
-
-       /* Retry if rule failed */
-       if (send_attr->retry_id) {
-               wqe_ctrl.imm = cpu_to_be32(send_attr->retry_id);
-               send_attr->id = send_attr->retry_id;
-               send_attr->retry_id = 0;
-               goto send_wqe;
-       }
-
-       return -1;
-}
-
-void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx,
-                         struct mlx5hws_send_engine *queue,
-                         struct mlx5hws_send_ste_attr *ste_attr)
-{
-       struct mlx5hws_send_engine_post_attr *send_attr = &ste_attr->send_attr;
-       struct mlx5hws_rule *rule = send_attr->rule;
-       struct mlx5_core_dev *mdev;
-       u16 queue_id;
-       u32 pdn;
-       int ret;
-
-       queue_id = queue - ctx->send_queue;
-       mdev = ctx->mdev;
-       pdn = ctx->pd_num;
-
-       /* Writing through FW can't HW fence, therefore we drain the queue */
-       if (send_attr->fence)
-               mlx5hws_send_queue_action(ctx,
-                                         queue_id,
-                                         MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
-
-       if (ste_attr->rtc_1) {
-               send_attr->id = ste_attr->rtc_1;
-               send_attr->used_id = ste_attr->used_id_rtc_1;
-               send_attr->retry_id = ste_attr->retry_rtc_1;
-               ret = hws_send_wqe_fw(mdev, pdn, send_attr,
-                                     ste_attr->wqe_ctrl,
-                                     ste_attr->wqe_data,
-                                     ste_attr->wqe_tag,
-                                     ste_attr->range_wqe_data,
-                                     ste_attr->range_wqe_tag,
-                                     ste_attr->wqe_tag_is_jumbo,
-                                     ste_attr->gta_opcode);
-               if (ret)
-                       goto fail_rule;
-       }
-
-       if (ste_attr->rtc_0) {
-               send_attr->id = ste_attr->rtc_0;
-               send_attr->used_id = ste_attr->used_id_rtc_0;
-               send_attr->retry_id = ste_attr->retry_rtc_0;
-               ret = hws_send_wqe_fw(mdev, pdn, send_attr,
-                                     ste_attr->wqe_ctrl,
-                                     ste_attr->wqe_data,
-                                     ste_attr->wqe_tag,
-                                     ste_attr->range_wqe_data,
-                                     ste_attr->range_wqe_tag,
-                                     ste_attr->wqe_tag_is_jumbo,
-                                     ste_attr->gta_opcode);
-               if (ret)
-                       goto fail_rule;
-       }
-
-       /* Increase the status, this only works on good flow as the enum
-        * is arrange it away creating -> created -> deleting -> deleted
-        */
-       if (likely(rule))
-               rule->status++;
-
-       mlx5hws_send_engine_gen_comp(queue, send_attr->user_data, MLX5HWS_FLOW_OP_SUCCESS);
-
-       return;
-
-fail_rule:
-       if (likely(rule))
-               rule->status = !rule->rtc_0 && !rule->rtc_1 ?
-                       MLX5HWS_RULE_STATUS_FAILED : MLX5HWS_RULE_STATUS_FAILING;
-
-       mlx5hws_send_engine_gen_comp(queue, send_attr->user_data, MLX5HWS_FLOW_OP_ERROR);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h
deleted file mode 100644 (file)
index b50825d..0000000
+++ /dev/null
@@ -1,270 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#ifndef MLX5HWS_SEND_H_
-#define MLX5HWS_SEND_H_
-
-/* As a single operation requires at least two WQEBBS.
- * This means a maximum of 16 such operations per rule.
- */
-#define MAX_WQES_PER_RULE 32
-
-enum mlx5hws_wqe_opcode {
-       MLX5HWS_WQE_OPCODE_TBL_ACCESS = 0x2c,
-};
-
-enum mlx5hws_wqe_opmod {
-       MLX5HWS_WQE_OPMOD_GTA_STE = 0,
-       MLX5HWS_WQE_OPMOD_GTA_MOD_ARG = 1,
-};
-
-enum mlx5hws_wqe_gta_opcode {
-       MLX5HWS_WQE_GTA_OP_ACTIVATE = 0,
-       MLX5HWS_WQE_GTA_OP_DEACTIVATE = 1,
-};
-
-enum mlx5hws_wqe_gta_opmod {
-       MLX5HWS_WQE_GTA_OPMOD_STE = 0,
-       MLX5HWS_WQE_GTA_OPMOD_MOD_ARG = 1,
-};
-
-enum mlx5hws_wqe_gta_sz {
-       MLX5HWS_WQE_SZ_GTA_CTRL = 48,
-       MLX5HWS_WQE_SZ_GTA_DATA = 64,
-};
-
-/* WQE Control segment. */
-struct mlx5hws_wqe_ctrl_seg {
-       __be32 opmod_idx_opcode;
-       __be32 qpn_ds;
-       __be32 flags;
-       __be32 imm;
-};
-
-struct mlx5hws_wqe_gta_ctrl_seg {
-       __be32 op_dirix;
-       __be32 stc_ix[5];
-       __be32 rsvd0[6];
-};
-
-struct mlx5hws_wqe_gta_data_seg_ste {
-       __be32 rsvd0_ctr_id;
-       __be32 rsvd1_definer;
-       __be32 rsvd2[3];
-       union {
-               struct {
-               __be32 action[3];
-               __be32 tag[8];
-               };
-               __be32 jumbo[11];
-       };
-};
-
-struct mlx5hws_wqe_gta_data_seg_arg {
-       __be32 action_args[8];
-};
-
-struct mlx5hws_wqe_gta {
-       struct mlx5hws_wqe_gta_ctrl_seg gta_ctrl;
-       union {
-               struct mlx5hws_wqe_gta_data_seg_ste seg_ste;
-               struct mlx5hws_wqe_gta_data_seg_arg seg_arg;
-       };
-};
-
-struct mlx5hws_send_ring_cq {
-       struct mlx5_core_dev *mdev;
-       struct mlx5_cqwq wq;
-       struct mlx5_wq_ctrl wq_ctrl;
-       struct mlx5_core_cq mcq;
-       u16 poll_wqe;
-};
-
-struct mlx5hws_send_ring_priv {
-       struct mlx5hws_rule *rule;
-       void *user_data;
-       u32 num_wqebbs;
-       u32 id;
-       u32 retry_id;
-       u32 *used_id;
-};
-
-struct mlx5hws_send_ring_dep_wqe {
-       struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl;
-       struct mlx5hws_wqe_gta_data_seg_ste wqe_data;
-       struct mlx5hws_rule *rule;
-       u32 rtc_0;
-       u32 rtc_1;
-       u32 retry_rtc_0;
-       u32 retry_rtc_1;
-       u32 direct_index;
-       void *user_data;
-};
-
-struct mlx5hws_send_ring_sq {
-       struct mlx5_core_dev *mdev;
-       u16 cur_post;
-       u16 buf_mask;
-       struct mlx5hws_send_ring_priv *wr_priv;
-       unsigned int last_idx;
-       struct mlx5hws_send_ring_dep_wqe *dep_wqe;
-       unsigned int head_dep_idx;
-       unsigned int tail_dep_idx;
-       u32 sqn;
-       struct mlx5_wq_cyc wq;
-       struct mlx5_wq_ctrl wq_ctrl;
-       void __iomem *uar_map;
-};
-
-struct mlx5hws_send_ring {
-       struct mlx5hws_send_ring_cq send_cq;
-       struct mlx5hws_send_ring_sq send_sq;
-};
-
-struct mlx5hws_completed_poll_entry {
-       void *user_data;
-       enum mlx5hws_flow_op_status status;
-};
-
-struct mlx5hws_completed_poll {
-       struct mlx5hws_completed_poll_entry *entries;
-       u16 ci;
-       u16 pi;
-       u16 mask;
-};
-
-struct mlx5hws_send_engine {
-       struct mlx5hws_send_ring send_ring;
-       struct mlx5_uars_page *uar; /* Uar is shared between rings of a queue */
-       struct mlx5hws_completed_poll completed;
-       u16 used_entries;
-       u16 num_entries;
-       bool err;
-       struct mutex lock; /* Protects the send engine */
-};
-
-struct mlx5hws_send_engine_post_ctrl {
-       struct mlx5hws_send_engine *queue;
-       struct mlx5hws_send_ring *send_ring;
-       size_t num_wqebbs;
-};
-
-struct mlx5hws_send_engine_post_attr {
-       u8 opcode;
-       u8 opmod;
-       u8 notify_hw;
-       u8 fence;
-       u8 match_definer_id;
-       u8 range_definer_id;
-       size_t len;
-       struct mlx5hws_rule *rule;
-       u32 id;
-       u32 retry_id;
-       u32 *used_id;
-       void *user_data;
-};
-
-struct mlx5hws_send_ste_attr {
-       u32 rtc_0;
-       u32 rtc_1;
-       u32 retry_rtc_0;
-       u32 retry_rtc_1;
-       u32 *used_id_rtc_0;
-       u32 *used_id_rtc_1;
-       bool wqe_tag_is_jumbo;
-       u8 gta_opcode;
-       u32 direct_index;
-       struct mlx5hws_send_engine_post_attr send_attr;
-       struct mlx5hws_rule_match_tag *wqe_tag;
-       struct mlx5hws_rule_match_tag *range_wqe_tag;
-       struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
-       struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
-       struct mlx5hws_wqe_gta_data_seg_ste *range_wqe_data;
-};
-
-struct mlx5hws_send_ring_dep_wqe *
-mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue);
-
-void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue);
-
-void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue);
-
-void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue);
-
-int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
-                           struct mlx5hws_send_engine *queue,
-                           u16 queue_size);
-
-void mlx5hws_send_queues_close(struct mlx5hws_context *ctx);
-
-int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
-                            u16 queues,
-                            u16 queue_size);
-
-int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
-                             u16 queue_id,
-                             u32 actions);
-
-int mlx5hws_send_test(struct mlx5hws_context *ctx,
-                     u16 queues,
-                     u16 queue_size);
-
-struct mlx5hws_send_engine_post_ctrl
-mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue);
-
-void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl,
-                                     char **buf, size_t *len);
-
-void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl,
-                                 struct mlx5hws_send_engine_post_attr *attr);
-
-void mlx5hws_send_ste(struct mlx5hws_send_engine *queue,
-                     struct mlx5hws_send_ste_attr *ste_attr);
-
-void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx,
-                         struct mlx5hws_send_engine *queue,
-                         struct mlx5hws_send_ste_attr *ste_attr);
-
-void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue);
-
-static inline bool mlx5hws_send_engine_empty(struct mlx5hws_send_engine *queue)
-{
-       struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
-       struct mlx5hws_send_ring_cq *send_cq = &queue->send_ring.send_cq;
-
-       return ((send_sq->cur_post & send_sq->buf_mask) == send_cq->poll_wqe);
-}
-
-static inline bool mlx5hws_send_engine_full(struct mlx5hws_send_engine *queue)
-{
-       return queue->used_entries >= queue->num_entries;
-}
-
-static inline void mlx5hws_send_engine_inc_rule(struct mlx5hws_send_engine *queue)
-{
-       queue->used_entries++;
-}
-
-static inline void mlx5hws_send_engine_dec_rule(struct mlx5hws_send_engine *queue)
-{
-       queue->used_entries--;
-}
-
-static inline void mlx5hws_send_engine_gen_comp(struct mlx5hws_send_engine *queue,
-                                               void *user_data,
-                                               int comp_status)
-{
-       struct mlx5hws_completed_poll *comp = &queue->completed;
-
-       comp->entries[comp->pi].status = comp_status;
-       comp->entries[comp->pi].user_data = user_data;
-
-       comp->pi = (comp->pi + 1) & comp->mask;
-}
-
-static inline bool mlx5hws_send_engine_err(struct mlx5hws_send_engine *queue)
-{
-       return queue->err;
-}
-
-#endif /* MLX5HWS_SEND_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c
deleted file mode 100644 (file)
index 8c063a8..0000000
+++ /dev/null
@@ -1,493 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#include "mlx5hws_internal.h"
-
-u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl)
-{
-       return tbl->ft_id;
-}
-
-static void hws_table_init_next_ft_attr(struct mlx5hws_table *tbl,
-                                       struct mlx5hws_cmd_ft_create_attr *ft_attr)
-{
-       ft_attr->type = tbl->fw_ft_type;
-       if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
-               ft_attr->level = tbl->ctx->caps->fdb_ft.max_level - 1;
-       else
-               ft_attr->level = tbl->ctx->caps->nic_ft.max_level - 1;
-       ft_attr->rtc_valid = true;
-}
-
-static void hws_table_set_cap_attr(struct mlx5hws_table *tbl,
-                                  struct mlx5hws_cmd_ft_create_attr *ft_attr)
-{
-       /* Enabling reformat_en or decap_en for the first flow table
-        * must be done when all VFs are down.
-        * However, HWS doesn't know when it is required to create the first FT.
-        * On the other hand, HWS doesn't use all these FT capabilities at all
-        * (the API doesn't even provide a way to specify these flags), so we'll
-        * just set these caps on all the flow tables.
-        * If HCA_CAP.fdb_dynamic_tunnel is set, this constraint is N/A.
-        */
-       if (!MLX5_CAP_ESW_FLOWTABLE(tbl->ctx->mdev, fdb_dynamic_tunnel)) {
-               ft_attr->reformat_en = true;
-               ft_attr->decap_en = true;
-       }
-}
-
-static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
-{
-       struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
-       struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
-       struct mlx5hws_cmd_forward_tbl *default_miss;
-       struct mlx5hws_cmd_set_fte_dest dest = {0};
-       struct mlx5hws_context *ctx = tbl->ctx;
-       u8 tbl_type = tbl->type;
-
-       if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
-               return 0;
-
-       if (ctx->common_res[tbl_type].default_miss) {
-               ctx->common_res[tbl_type].default_miss->refcount++;
-               return 0;
-       }
-
-       ft_attr.type = tbl->fw_ft_type;
-       ft_attr.level = tbl->ctx->caps->fdb_ft.max_level; /* The last level */
-       ft_attr.rtc_valid = false;
-
-       dest.destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
-       dest.destination_id = ctx->caps->eswitch_manager_vport_number;
-
-       fte_attr.action_flags = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
-       fte_attr.dests_num = 1;
-       fte_attr.dests = &dest;
-
-       default_miss = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr);
-       if (!default_miss) {
-               mlx5hws_err(ctx, "Failed to default miss table type: 0x%x\n", tbl_type);
-               return -EINVAL;
-       }
-
-       /* ctx->ctrl_lock must be held here */
-       ctx->common_res[tbl_type].default_miss = default_miss;
-       ctx->common_res[tbl_type].default_miss->refcount++;
-
-       return 0;
-}
-
-/* Called under ctx->ctrl_lock */
-static void hws_table_down_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
-{
-       struct mlx5hws_cmd_forward_tbl *default_miss;
-       struct mlx5hws_context *ctx = tbl->ctx;
-       u8 tbl_type = tbl->type;
-
-       if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
-               return;
-
-       default_miss = ctx->common_res[tbl_type].default_miss;
-       if (--default_miss->refcount)
-               return;
-
-       mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, default_miss);
-       ctx->common_res[tbl_type].default_miss = NULL;
-}
-
-static int hws_table_connect_to_default_miss_tbl(struct mlx5hws_table *tbl, u32 ft_id)
-{
-       struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
-       int ret;
-
-       if (unlikely(tbl->type != MLX5HWS_TABLE_TYPE_FDB))
-               pr_warn("HWS: invalid table type %d\n", tbl->type);
-
-       mlx5hws_cmd_set_attr_connect_miss_tbl(tbl->ctx,
-                                             tbl->fw_ft_type,
-                                             tbl->type,
-                                             &ft_attr);
-
-       ret = mlx5hws_cmd_flow_table_modify(tbl->ctx->mdev, &ft_attr, ft_id);
-       if (ret) {
-               mlx5hws_err(tbl->ctx, "Failed to connect FT to default FDB FT\n");
-               return ret;
-       }
-
-       return 0;
-}
-
-int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
-                                   struct mlx5hws_table *tbl,
-                                   u32 *ft_id)
-{
-       struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
-       int ret;
-
-       hws_table_init_next_ft_attr(tbl, &ft_attr);
-       hws_table_set_cap_attr(tbl, &ft_attr);
-
-       ret = mlx5hws_cmd_flow_table_create(mdev, &ft_attr, ft_id);
-       if (ret) {
-               mlx5hws_err(tbl->ctx, "Failed creating default ft\n");
-               return ret;
-       }
-
-       if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
-               /* Take/create ref over the default miss */
-               ret = hws_table_up_default_fdb_miss_tbl(tbl);
-               if (ret) {
-                       mlx5hws_err(tbl->ctx, "Failed to get default fdb miss\n");
-                       goto free_ft_obj;
-               }
-               ret = hws_table_connect_to_default_miss_tbl(tbl, *ft_id);
-               if (ret) {
-                       mlx5hws_err(tbl->ctx, "Failed connecting to default miss tbl\n");
-                       goto down_miss_tbl;
-               }
-       }
-
-       return 0;
-
-down_miss_tbl:
-       hws_table_down_default_fdb_miss_tbl(tbl);
-free_ft_obj:
-       mlx5hws_cmd_flow_table_destroy(mdev, ft_attr.type, *ft_id);
-       return ret;
-}
-
-void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl,
-                                     u32 ft_id)
-{
-       mlx5hws_cmd_flow_table_destroy(tbl->ctx->mdev, tbl->fw_ft_type, ft_id);
-       hws_table_down_default_fdb_miss_tbl(tbl);
-}
-
-static int hws_table_init_check_hws_support(struct mlx5hws_context *ctx,
-                                           struct mlx5hws_table *tbl)
-{
-       if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT)) {
-               mlx5hws_err(ctx, "HWS not supported, cannot create mlx5hws_table\n");
-               return -EOPNOTSUPP;
-       }
-
-       return 0;
-}
-
-static int hws_table_init(struct mlx5hws_table *tbl)
-{
-       struct mlx5hws_context *ctx = tbl->ctx;
-       int ret;
-
-       ret = hws_table_init_check_hws_support(ctx, tbl);
-       if (ret)
-               return ret;
-
-       if (mlx5hws_table_get_fw_ft_type(tbl->type, (u8 *)&tbl->fw_ft_type)) {
-               pr_warn("HWS: invalid table type %d\n", tbl->type);
-               return -EOPNOTSUPP;
-       }
-
-       mutex_lock(&ctx->ctrl_lock);
-       ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &tbl->ft_id);
-       if (ret) {
-               mlx5hws_err(tbl->ctx, "Failed to create flow table object\n");
-               mutex_unlock(&ctx->ctrl_lock);
-               return ret;
-       }
-
-       ret = mlx5hws_action_get_default_stc(ctx, tbl->type);
-       if (ret)
-               goto tbl_destroy;
-
-       INIT_LIST_HEAD(&tbl->matchers_list);
-       INIT_LIST_HEAD(&tbl->default_miss.head);
-
-       mutex_unlock(&ctx->ctrl_lock);
-
-       return 0;
-
-tbl_destroy:
-       mlx5hws_table_destroy_default_ft(tbl, tbl->ft_id);
-       mutex_unlock(&ctx->ctrl_lock);
-       return ret;
-}
-
-static void hws_table_uninit(struct mlx5hws_table *tbl)
-{
-       mutex_lock(&tbl->ctx->ctrl_lock);
-       mlx5hws_action_put_default_stc(tbl->ctx, tbl->type);
-       mlx5hws_table_destroy_default_ft(tbl, tbl->ft_id);
-       mutex_unlock(&tbl->ctx->ctrl_lock);
-}
-
-struct mlx5hws_table *mlx5hws_table_create(struct mlx5hws_context *ctx,
-                                          struct mlx5hws_table_attr *attr)
-{
-       struct mlx5hws_table *tbl;
-       int ret;
-
-       if (attr->type > MLX5HWS_TABLE_TYPE_FDB) {
-               mlx5hws_err(ctx, "Invalid table type %d\n", attr->type);
-               return NULL;
-       }
-
-       tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
-       if (!tbl)
-               return NULL;
-
-       tbl->ctx = ctx;
-       tbl->type = attr->type;
-       tbl->level = attr->level;
-
-       ret = hws_table_init(tbl);
-       if (ret) {
-               mlx5hws_err(ctx, "Failed to initialise table\n");
-               goto free_tbl;
-       }
-
-       mutex_lock(&ctx->ctrl_lock);
-       list_add(&tbl->tbl_list_node, &ctx->tbl_list);
-       mutex_unlock(&ctx->ctrl_lock);
-
-       return tbl;
-
-free_tbl:
-       kfree(tbl);
-       return NULL;
-}
-
-int mlx5hws_table_destroy(struct mlx5hws_table *tbl)
-{
-       struct mlx5hws_context *ctx = tbl->ctx;
-       int ret;
-
-       mutex_lock(&ctx->ctrl_lock);
-       if (!list_empty(&tbl->matchers_list)) {
-               mlx5hws_err(tbl->ctx, "Cannot destroy table containing matchers\n");
-               ret = -EBUSY;
-               goto unlock_err;
-       }
-
-       if (!list_empty(&tbl->default_miss.head)) {
-               mlx5hws_err(tbl->ctx, "Cannot destroy table pointed by default miss\n");
-               ret = -EBUSY;
-               goto unlock_err;
-       }
-
-       list_del_init(&tbl->tbl_list_node);
-       mutex_unlock(&ctx->ctrl_lock);
-
-       hws_table_uninit(tbl);
-       kfree(tbl);
-
-       return 0;
-
-unlock_err:
-       mutex_unlock(&ctx->ctrl_lock);
-       return ret;
-}
-
-static u32 hws_table_get_last_ft(struct mlx5hws_table *tbl)
-{
-       struct mlx5hws_matcher *matcher;
-
-       if (list_empty(&tbl->matchers_list))
-               return tbl->ft_id;
-
-       matcher = list_last_entry(&tbl->matchers_list, struct mlx5hws_matcher, list_node);
-       return matcher->end_ft_id;
-}
-
-int mlx5hws_table_ft_set_default_next_ft(struct mlx5hws_table *tbl, u32 ft_id)
-{
-       struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
-       int ret;
-
-       /* Due to FW limitation, resetting the flow table to default action will
-        * disconnect RTC when ignore_flow_level_rtc_valid is not supported.
-        */
-       if (!tbl->ctx->caps->nic_ft.ignore_flow_level_rtc_valid)
-               return 0;
-
-       if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
-               return hws_table_connect_to_default_miss_tbl(tbl, ft_id);
-
-       ft_attr.type = tbl->fw_ft_type;
-       ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
-       ft_attr.table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_DEFAULT;
-
-       ret = mlx5hws_cmd_flow_table_modify(tbl->ctx->mdev, &ft_attr, ft_id);
-       if (ret) {
-               mlx5hws_err(tbl->ctx, "Failed to set FT default miss action\n");
-               return ret;
-       }
-
-       return 0;
-}
-
-int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx,
-                                 u32 ft_id,
-                                 u32 fw_ft_type,
-                                 u32 rtc_0_id,
-                                 u32 rtc_1_id)
-{
-       struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
-
-       ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID;
-       ft_attr.type = fw_ft_type;
-       ft_attr.rtc_id_0 = rtc_0_id;
-       ft_attr.rtc_id_1 = rtc_1_id;
-
-       return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id);
-}
-
-static int hws_table_ft_set_next_ft(struct mlx5hws_context *ctx,
-                                   u32 ft_id,
-                                   u32 fw_ft_type,
-                                   u32 next_ft_id)
-{
-       struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
-
-       ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
-       ft_attr.table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
-       ft_attr.type = fw_ft_type;
-       ft_attr.table_miss_id = next_ft_id;
-
-       return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id);
-}
-
-int mlx5hws_table_update_connected_miss_tables(struct mlx5hws_table *dst_tbl)
-{
-       struct mlx5hws_table *src_tbl;
-       int ret;
-
-       if (list_empty(&dst_tbl->default_miss.head))
-               return 0;
-
-       list_for_each_entry(src_tbl, &dst_tbl->default_miss.head, default_miss.next) {
-               ret = mlx5hws_table_connect_to_miss_table(src_tbl, dst_tbl);
-               if (ret) {
-                       mlx5hws_err(dst_tbl->ctx,
-                                   "Failed to update source miss table, unexpected behavior\n");
-                       return ret;
-               }
-       }
-
-       return 0;
-}
-
-int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl,
-                                       struct mlx5hws_table *dst_tbl)
-{
-       struct mlx5hws_matcher *matcher;
-       u32 last_ft_id;
-       int ret;
-
-       last_ft_id = hws_table_get_last_ft(src_tbl);
-
-       if (dst_tbl) {
-               if (list_empty(&dst_tbl->matchers_list)) {
-                       /* Connect src_tbl last_ft to dst_tbl start anchor */
-                       ret = hws_table_ft_set_next_ft(src_tbl->ctx,
-                                                      last_ft_id,
-                                                      src_tbl->fw_ft_type,
-                                                      dst_tbl->ft_id);
-                       if (ret)
-                               return ret;
-
-                       /* Reset last_ft RTC to default RTC */
-                       ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx,
-                                                           last_ft_id,
-                                                           src_tbl->fw_ft_type,
-                                                           0, 0);
-                       if (ret)
-                               return ret;
-               } else {
-                       /* Connect src_tbl last_ft to first matcher RTC */
-                       matcher = list_first_entry(&dst_tbl->matchers_list,
-                                                  struct mlx5hws_matcher,
-                                                  list_node);
-                       ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx,
-                                                           last_ft_id,
-                                                           src_tbl->fw_ft_type,
-                                                           matcher->match_ste.rtc_0_id,
-                                                           matcher->match_ste.rtc_1_id);
-                       if (ret)
-                               return ret;
-
-                       /* Reset next miss FT to default */
-                       ret = mlx5hws_table_ft_set_default_next_ft(src_tbl, last_ft_id);
-                       if (ret)
-                               return ret;
-               }
-       } else {
-               /* Reset next miss FT to default */
-               ret = mlx5hws_table_ft_set_default_next_ft(src_tbl, last_ft_id);
-               if (ret)
-                       return ret;
-
-               /* Reset last_ft RTC to default RTC */
-               ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx,
-                                                   last_ft_id,
-                                                   src_tbl->fw_ft_type,
-                                                   0, 0);
-               if (ret)
-                       return ret;
-       }
-
-       src_tbl->default_miss.miss_tbl = dst_tbl;
-
-       return 0;
-}
-
-static int hws_table_set_default_miss_not_valid(struct mlx5hws_table *tbl,
-                                               struct mlx5hws_table *miss_tbl)
-{
-       if (!tbl->ctx->caps->nic_ft.ignore_flow_level_rtc_valid) {
-               mlx5hws_err(tbl->ctx, "Default miss table is not supported\n");
-               return -EOPNOTSUPP;
-       }
-
-       if ((miss_tbl && miss_tbl->type != tbl->type)) {
-               mlx5hws_err(tbl->ctx, "Invalid arguments\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl,
-                                  struct mlx5hws_table *miss_tbl)
-{
-       struct mlx5hws_context *ctx = tbl->ctx;
-       struct mlx5hws_table *old_miss_tbl;
-       int ret;
-
-       ret = hws_table_set_default_miss_not_valid(tbl, miss_tbl);
-       if (ret)
-               return ret;
-
-       mutex_lock(&ctx->ctrl_lock);
-
-       old_miss_tbl = tbl->default_miss.miss_tbl;
-       ret = mlx5hws_table_connect_to_miss_table(tbl, miss_tbl);
-       if (ret)
-               goto out;
-
-       if (old_miss_tbl)
-               list_del_init(&tbl->default_miss.next);
-
-       old_miss_tbl = tbl->default_miss.miss_tbl;
-       if (old_miss_tbl)
-               list_del_init(&old_miss_tbl->default_miss.head);
-
-       if (miss_tbl)
-               list_add(&tbl->default_miss.next, &miss_tbl->default_miss.head);
-
-       mutex_unlock(&ctx->ctrl_lock);
-       return 0;
-out:
-       mutex_unlock(&ctx->ctrl_lock);
-       return ret;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h
deleted file mode 100644 (file)
index dd50420..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#ifndef MLX5HWS_TABLE_H_
-#define MLX5HWS_TABLE_H_
-
-struct mlx5hws_default_miss {
-       /* My miss table */
-       struct mlx5hws_table *miss_tbl;
-       struct list_head next;
-       /* Tables missing to my table */
-       struct list_head head;
-};
-
-struct mlx5hws_table {
-       struct mlx5hws_context *ctx;
-       u32 ft_id;
-       enum mlx5hws_table_type type;
-       u32 fw_ft_type;
-       u32 level;
-       struct list_head matchers_list;
-       struct list_head tbl_list_node;
-       struct mlx5hws_default_miss default_miss;
-};
-
-static inline
-u32 mlx5hws_table_get_fw_ft_type(enum mlx5hws_table_type type,
-                                u8 *ret_type)
-{
-       if (type != MLX5HWS_TABLE_TYPE_FDB)
-               return -EOPNOTSUPP;
-
-       *ret_type = FS_FT_FDB;
-
-       return 0;
-}
-
-static inline
-u32 mlx5hws_table_get_res_fw_ft_type(enum mlx5hws_table_type tbl_type,
-                                    bool is_mirror)
-{
-       if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
-               return is_mirror ? FS_FT_FDB_TX : FS_FT_FDB_RX;
-
-       return 0;
-}
-
-int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
-                                   struct mlx5hws_table *tbl,
-                                   u32 *ft_id);
-
-void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl,
-                                     u32 ft_id);
-
-int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl,
-                                       struct mlx5hws_table *dst_tbl);
-
-int mlx5hws_table_update_connected_miss_tables(struct mlx5hws_table *dst_tbl);
-
-int mlx5hws_table_ft_set_default_next_ft(struct mlx5hws_table *tbl, u32 ft_id);
-
-int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx,
-                                 u32 ft_id,
-                                 u32 fw_ft_type,
-                                 u32 rtc_0_id,
-                                 u32 rtc_1_id);
-
-#endif /* MLX5HWS_TABLE_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c
deleted file mode 100644 (file)
index faf4242..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#include "mlx5hws_internal.h"
-
-int mlx5hws_vport_init_vports(struct mlx5hws_context *ctx)
-{
-       int ret;
-
-       if (!ctx->caps->eswitch_manager)
-               return 0;
-
-       xa_init(&ctx->vports.vport_gvmi_xa);
-
-       /* Set gvmi for eswitch manager and uplink vports only. Rest of the vports
-        * (vport 0 of other function, VFs and SFs) will be queried dynamically.
-        */
-
-       ret = mlx5hws_cmd_query_gvmi(ctx->mdev, false, 0, &ctx->vports.esw_manager_gvmi);
-       if (ret)
-               return ret;
-
-       ctx->vports.uplink_gvmi = 0;
-       return 0;
-}
-
-void mlx5hws_vport_uninit_vports(struct mlx5hws_context *ctx)
-{
-       if (ctx->caps->eswitch_manager)
-               xa_destroy(&ctx->vports.vport_gvmi_xa);
-}
-
-static int hws_vport_add_gvmi(struct mlx5hws_context *ctx, u16 vport)
-{
-       u16 vport_gvmi;
-       int ret;
-
-       ret = mlx5hws_cmd_query_gvmi(ctx->mdev, true, vport, &vport_gvmi);
-       if (ret)
-               return -EINVAL;
-
-       ret = xa_insert(&ctx->vports.vport_gvmi_xa, vport,
-                       xa_mk_value(vport_gvmi), GFP_KERNEL);
-       if (ret)
-               mlx5hws_dbg(ctx, "Couldn't insert new vport gvmi into xarray (%d)\n", ret);
-
-       return ret;
-}
-
-static bool hws_vport_is_esw_mgr_vport(struct mlx5hws_context *ctx, u16 vport)
-{
-       return ctx->caps->is_ecpf ? vport == MLX5_VPORT_ECPF :
-                                   vport == MLX5_VPORT_PF;
-}
-
-int mlx5hws_vport_get_gvmi(struct mlx5hws_context *ctx, u16 vport, u16 *vport_gvmi)
-{
-       void *entry;
-       int ret;
-
-       if (!ctx->caps->eswitch_manager)
-               return -EINVAL;
-
-       if (hws_vport_is_esw_mgr_vport(ctx, vport)) {
-               *vport_gvmi = ctx->vports.esw_manager_gvmi;
-               return 0;
-       }
-
-       if (vport == MLX5_VPORT_UPLINK) {
-               *vport_gvmi = ctx->vports.uplink_gvmi;
-               return 0;
-       }
-
-load_entry:
-       entry = xa_load(&ctx->vports.vport_gvmi_xa, vport);
-
-       if (!xa_is_value(entry)) {
-               ret = hws_vport_add_gvmi(ctx, vport);
-               if (ret && ret != -EBUSY)
-                       return ret;
-               goto load_entry;
-       }
-
-       *vport_gvmi = (u16)xa_to_value(entry);
-       return 0;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h
deleted file mode 100644 (file)
index 0912fc1..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#ifndef MLX5HWS_VPORT_H_
-#define MLX5HWS_VPORT_H_
-
-int mlx5hws_vport_init_vports(struct mlx5hws_context *ctx);
-
-void mlx5hws_vport_uninit_vports(struct mlx5hws_context *ctx);
-
-int mlx5hws_vport_get_gvmi(struct mlx5hws_context *ctx, u16 vport, u16 *vport_gvmi);
-
-#endif /* MLX5HWS_VPORT_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
new file mode 100644 (file)
index 0000000..06db5e4
--- /dev/null
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_data_size_to_arg_log_size(u16 data_size)
+{
+       /* Return the roundup of log2(data_size) */
+       if (data_size <= MLX5HWS_ARG_DATA_SIZE)
+               return MLX5HWS_ARG_CHUNK_SIZE_1;
+       if (data_size <= MLX5HWS_ARG_DATA_SIZE * 2)
+               return MLX5HWS_ARG_CHUNK_SIZE_2;
+       if (data_size <= MLX5HWS_ARG_DATA_SIZE * 4)
+               return MLX5HWS_ARG_CHUNK_SIZE_3;
+       if (data_size <= MLX5HWS_ARG_DATA_SIZE * 8)
+               return MLX5HWS_ARG_CHUNK_SIZE_4;
+
+       return MLX5HWS_ARG_CHUNK_SIZE_MAX;
+}
+
+u32 mlx5hws_arg_data_size_to_arg_size(u16 data_size)
+{
+       return BIT(mlx5hws_arg_data_size_to_arg_log_size(data_size));
+}
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_get_arg_log_size(u16 num_of_actions)
+{
+       return mlx5hws_arg_data_size_to_arg_log_size(num_of_actions *
+                                                   MLX5HWS_MODIFY_ACTION_SIZE);
+}
+
+u32 mlx5hws_arg_get_arg_size(u16 num_of_actions)
+{
+       return BIT(mlx5hws_arg_get_arg_log_size(num_of_actions));
+}
+
+bool mlx5hws_pat_require_reparse(__be64 *actions, u16 num_of_actions)
+{
+       u16 i, field;
+       u8 action_id;
+
+       for (i = 0; i < num_of_actions; i++) {
+               action_id = MLX5_GET(set_action_in, &actions[i], action_type);
+
+               switch (action_id) {
+               case MLX5_MODIFICATION_TYPE_NOP:
+                       field = MLX5_MODI_OUT_NONE;
+                       break;
+
+               case MLX5_MODIFICATION_TYPE_SET:
+               case MLX5_MODIFICATION_TYPE_ADD:
+                       field = MLX5_GET(set_action_in, &actions[i], field);
+                       break;
+
+               case MLX5_MODIFICATION_TYPE_COPY:
+               case MLX5_MODIFICATION_TYPE_ADD_FIELD:
+                       field = MLX5_GET(copy_action_in, &actions[i], dst_field);
+                       break;
+
+               default:
+                       /* Insert/Remove/Unknown actions require reparse */
+                       return true;
+               }
+
+               /* Below fields can change packet structure require a reparse */
+               if (field == MLX5_MODI_OUT_ETHERTYPE ||
+                   field == MLX5_MODI_OUT_IPV6_NEXT_HDR)
+                       return true;
+       }
+
+       return false;
+}
+
+/* Cache and cache element handling */
+int mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache **cache)
+{
+       struct mlx5hws_pattern_cache *new_cache;
+
+       new_cache = kzalloc(sizeof(*new_cache), GFP_KERNEL);
+       if (!new_cache)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&new_cache->ptrn_list);
+       mutex_init(&new_cache->lock);
+
+       *cache = new_cache;
+
+       return 0;
+}
+
+void mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache *cache)
+{
+       mutex_destroy(&cache->lock);
+       kfree(cache);
+}
+
+static bool mlx5hws_pat_compare_pattern(int cur_num_of_actions,
+                                       __be64 cur_actions[],
+                                       int num_of_actions,
+                                       __be64 actions[])
+{
+       int i;
+
+       if (cur_num_of_actions != num_of_actions)
+               return false;
+
+       for (i = 0; i < num_of_actions; i++) {
+               u8 action_id =
+                       MLX5_GET(set_action_in, &actions[i], action_type);
+
+               if (action_id == MLX5_MODIFICATION_TYPE_COPY ||
+                   action_id == MLX5_MODIFICATION_TYPE_ADD_FIELD) {
+                       if (actions[i] != cur_actions[i])
+                               return false;
+               } else {
+                       /* Compare just the control, not the values */
+                       if ((__force __be32)actions[i] !=
+                           (__force __be32)cur_actions[i])
+                               return false;
+               }
+       }
+
+       return true;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_find_cached_pattern(struct mlx5hws_pattern_cache *cache,
+                               u16 num_of_actions,
+                               __be64 *actions)
+{
+       struct mlx5hws_pattern_cache_item *cached_pat = NULL;
+
+       list_for_each_entry(cached_pat, &cache->ptrn_list, ptrn_list_node) {
+               if (mlx5hws_pat_compare_pattern(cached_pat->mh_data.num_of_actions,
+                                               (__be64 *)cached_pat->mh_data.data,
+                                               num_of_actions,
+                                               actions))
+                       return cached_pat;
+       }
+
+       return NULL;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_get_existing_cached_pattern(struct mlx5hws_pattern_cache *cache,
+                                       u16 num_of_actions,
+                                       __be64 *actions)
+{
+       struct mlx5hws_pattern_cache_item *cached_pattern;
+
+       cached_pattern = mlx5hws_pat_find_cached_pattern(cache, num_of_actions, actions);
+       if (cached_pattern) {
+               /* LRU: move it to be first in the list */
+               list_del_init(&cached_pattern->ptrn_list_node);
+               list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
+               cached_pattern->refcount++;
+       }
+
+       return cached_pattern;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_add_pattern_to_cache(struct mlx5hws_pattern_cache *cache,
+                                u32 pattern_id,
+                                u16 num_of_actions,
+                                __be64 *actions)
+{
+       struct mlx5hws_pattern_cache_item *cached_pattern;
+
+       cached_pattern = kzalloc(sizeof(*cached_pattern), GFP_KERNEL);
+       if (!cached_pattern)
+               return NULL;
+
+       cached_pattern->mh_data.num_of_actions = num_of_actions;
+       cached_pattern->mh_data.pattern_id = pattern_id;
+       cached_pattern->mh_data.data =
+               kmemdup(actions, num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL);
+       if (!cached_pattern->mh_data.data)
+               goto free_cached_obj;
+
+       list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
+       cached_pattern->refcount = 1;
+
+       return cached_pattern;
+
+free_cached_obj:
+       kfree(cached_pattern);
+       return NULL;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_find_cached_pattern_by_id(struct mlx5hws_pattern_cache *cache,
+                                     u32 ptrn_id)
+{
+       struct mlx5hws_pattern_cache_item *cached_pattern = NULL;
+
+       list_for_each_entry(cached_pattern, &cache->ptrn_list, ptrn_list_node) {
+               if (cached_pattern->mh_data.pattern_id == ptrn_id)
+                       return cached_pattern;
+       }
+
+       return NULL;
+}
+
+static void
+mlx5hws_pat_remove_pattern(struct mlx5hws_pattern_cache_item *cached_pattern)
+{
+       list_del_init(&cached_pattern->ptrn_list_node);
+
+       kfree(cached_pattern->mh_data.data);
+       kfree(cached_pattern);
+}
+
+void mlx5hws_pat_put_pattern(struct mlx5hws_context *ctx, u32 ptrn_id)
+{
+       struct mlx5hws_pattern_cache *cache = ctx->pattern_cache;
+       struct mlx5hws_pattern_cache_item *cached_pattern;
+
+       mutex_lock(&cache->lock);
+       cached_pattern = mlx5hws_pat_find_cached_pattern_by_id(cache, ptrn_id);
+       if (!cached_pattern) {
+               mlx5hws_err(ctx, "Failed to find cached pattern with provided ID\n");
+               pr_warn("HWS: pattern ID %d is not found\n", ptrn_id);
+               goto out;
+       }
+
+       if (--cached_pattern->refcount)
+               goto out;
+
+       mlx5hws_pat_remove_pattern(cached_pattern);
+       mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, ptrn_id);
+
+out:
+       mutex_unlock(&cache->lock);
+}
+
+int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx,
+                           __be64 *pattern, size_t pattern_sz,
+                           u32 *pattern_id)
+{
+       u16 num_of_actions = pattern_sz / MLX5HWS_MODIFY_ACTION_SIZE;
+       struct mlx5hws_pattern_cache_item *cached_pattern;
+       u32 ptrn_id = 0;
+       int ret = 0;
+
+       mutex_lock(&ctx->pattern_cache->lock);
+
+       cached_pattern = mlx5hws_pat_get_existing_cached_pattern(ctx->pattern_cache,
+                                                                num_of_actions,
+                                                                pattern);
+       if (cached_pattern) {
+               *pattern_id = cached_pattern->mh_data.pattern_id;
+               goto out_unlock;
+       }
+
+       ret = mlx5hws_cmd_header_modify_pattern_create(ctx->mdev,
+                                                      pattern_sz,
+                                                      (u8 *)pattern,
+                                                      &ptrn_id);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to create pattern FW object\n");
+               goto out_unlock;
+       }
+
+       cached_pattern = mlx5hws_pat_add_pattern_to_cache(ctx->pattern_cache,
+                                                         ptrn_id,
+                                                         num_of_actions,
+                                                         pattern);
+       if (!cached_pattern) {
+               mlx5hws_err(ctx, "Failed to add pattern to cache\n");
+               ret = -EINVAL;
+               goto clean_pattern;
+       }
+
+       mutex_unlock(&ctx->pattern_cache->lock);
+       *pattern_id = ptrn_id;
+
+       return ret;
+
+clean_pattern:
+       mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, *pattern_id);
+out_unlock:
+       mutex_unlock(&ctx->pattern_cache->lock);
+       return ret;
+}
+
+static void
+mlx5d_arg_init_send_attr(struct mlx5hws_send_engine_post_attr *send_attr,
+                        void *comp_data,
+                        u32 arg_idx)
+{
+       send_attr->opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+       send_attr->opmod = MLX5HWS_WQE_GTA_OPMOD_MOD_ARG;
+       send_attr->len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+       send_attr->id = arg_idx;
+       send_attr->user_data = comp_data;
+}
+
+void mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine *queue,
+                              u32 arg_idx,
+                              u8 *arg_data,
+                              u16 num_of_actions)
+{
+       struct mlx5hws_send_engine_post_attr send_attr = {0};
+       struct mlx5hws_wqe_gta_data_seg_arg *wqe_arg = NULL;
+       struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl = NULL;
+       struct mlx5hws_send_engine_post_ctrl ctrl;
+       size_t wqe_len;
+
+       mlx5d_arg_init_send_attr(&send_attr, NULL, arg_idx);
+
+       ctrl = mlx5hws_send_engine_post_start(queue);
+       mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+       memset(wqe_ctrl, 0, wqe_len);
+       mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
+       mlx5hws_action_prepare_decap_l3_data(arg_data, (u8 *)wqe_arg,
+                                            num_of_actions);
+       mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+}
+
+void mlx5hws_arg_write(struct mlx5hws_send_engine *queue,
+                      void *comp_data,
+                      u32 arg_idx,
+                      u8 *arg_data,
+                      size_t data_size)
+{
+       struct mlx5hws_send_engine_post_attr send_attr = {0};
+       struct mlx5hws_wqe_gta_data_seg_arg *wqe_arg;
+       struct mlx5hws_send_engine_post_ctrl ctrl;
+       struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+       int i, full_iter, leftover;
+       size_t wqe_len;
+
+       mlx5d_arg_init_send_attr(&send_attr, comp_data, arg_idx);
+
+       /* Each WQE can hold 64B of data, it might require multiple iteration */
+       full_iter = data_size / MLX5HWS_ARG_DATA_SIZE;
+       leftover = data_size & (MLX5HWS_ARG_DATA_SIZE - 1);
+
+       for (i = 0; i < full_iter; i++) {
+               ctrl = mlx5hws_send_engine_post_start(queue);
+               mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+               memset(wqe_ctrl, 0, wqe_len);
+               mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
+               memcpy(wqe_arg, arg_data, wqe_len);
+               send_attr.id = arg_idx++;
+               mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+
+               /* Move to next argument data */
+               arg_data += MLX5HWS_ARG_DATA_SIZE;
+       }
+
+       if (leftover) {
+               ctrl = mlx5hws_send_engine_post_start(queue);
+               mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+               memset(wqe_ctrl, 0, wqe_len);
+               mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
+               memcpy(wqe_arg, arg_data, leftover);
+               send_attr.id = arg_idx;
+               mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+       }
+}
+
+int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx,
+                                     u32 arg_idx,
+                                     u8 *arg_data,
+                                     size_t data_size)
+{
+       struct mlx5hws_send_engine *queue;
+       int ret;
+
+       mutex_lock(&ctx->ctrl_lock);
+
+       /* Get the control queue */
+       queue = &ctx->send_queue[ctx->queues - 1];
+
+       mlx5hws_arg_write(queue, arg_data, arg_idx, arg_data, data_size);
+
+       mlx5hws_send_engine_flush_queue(queue);
+
+       /* Poll for completion */
+       ret = mlx5hws_send_queue_action(ctx, ctx->queues - 1,
+                                       MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
+
+       if (ret)
+               mlx5hws_err(ctx, "Failed to drain arg queue\n");
+
+       mutex_unlock(&ctx->ctrl_lock);
+
+       return ret;
+}
+
+bool mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context *ctx,
+                                          u32 arg_size)
+{
+       if (arg_size < ctx->caps->log_header_modify_argument_granularity ||
+           arg_size > ctx->caps->log_header_modify_argument_max_alloc) {
+               return false;
+       }
+       return true;
+}
+
+int mlx5hws_arg_create(struct mlx5hws_context *ctx,
+                      u8 *data,
+                      size_t data_sz,
+                      u32 log_bulk_sz,
+                      bool write_data,
+                      u32 *arg_id)
+{
+       u16 single_arg_log_sz;
+       u16 multi_arg_log_sz;
+       int ret;
+       u32 id;
+
+       single_arg_log_sz = mlx5hws_arg_data_size_to_arg_log_size(data_sz);
+       multi_arg_log_sz = single_arg_log_sz + log_bulk_sz;
+
+       if (single_arg_log_sz >= MLX5HWS_ARG_CHUNK_SIZE_MAX) {
+               mlx5hws_err(ctx, "Requested single arg %u not supported\n", single_arg_log_sz);
+               return -EOPNOTSUPP;
+       }
+
+       if (!mlx5hws_arg_is_valid_arg_request_size(ctx, multi_arg_log_sz)) {
+               mlx5hws_err(ctx, "Argument log size %d not supported by FW\n", multi_arg_log_sz);
+               return -EOPNOTSUPP;
+       }
+
+       /* Alloc bulk of args */
+       ret = mlx5hws_cmd_arg_create(ctx->mdev, multi_arg_log_sz, ctx->pd_num, &id);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed allocating arg in order: %d\n", multi_arg_log_sz);
+               return ret;
+       }
+
+       if (write_data) {
+               ret = mlx5hws_arg_write_inline_arg_data(ctx, id,
+                                                       data, data_sz);
+               if (ret) {
+                       mlx5hws_err(ctx, "Failed writing arg data\n");
+                       mlx5hws_cmd_arg_destroy(ctx->mdev, id);
+                       return ret;
+               }
+       }
+
+       *arg_id = id;
+       return ret;
+}
+
+void mlx5hws_arg_destroy(struct mlx5hws_context *ctx, u32 arg_id)
+{
+       mlx5hws_cmd_arg_destroy(ctx->mdev, arg_id);
+}
+
+int mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context *ctx,
+                                        __be64 *data,
+                                        u8 num_of_actions,
+                                        u32 log_bulk_sz,
+                                        bool write_data,
+                                        u32 *arg_id)
+{
+       size_t data_sz = num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE;
+       int ret;
+
+       ret = mlx5hws_arg_create(ctx,
+                                (u8 *)data,
+                                data_sz,
+                                log_bulk_sz,
+                                write_data,
+                                arg_id);
+       if (ret)
+               mlx5hws_err(ctx, "Failed creating modify header arg\n");
+
+       return ret;
+}
+
+static int
+hws_action_modify_check_field_limitation(u8 action_type, __be64 *pattern)
+{
+       /* Need to check field limitation here, but for now - return OK */
+       return 0;
+}
+
+#define INVALID_FIELD 0xffff
+
+static void
+hws_action_modify_get_target_fields(u8 action_type, __be64 *pattern,
+                                   u16 *src_field, u16 *dst_field)
+{
+       switch (action_type) {
+       case MLX5_ACTION_TYPE_SET:
+       case MLX5_ACTION_TYPE_ADD:
+               *src_field = MLX5_GET(set_action_in, pattern, field);
+               *dst_field = INVALID_FIELD;
+               break;
+       case MLX5_ACTION_TYPE_COPY:
+               *src_field = MLX5_GET(copy_action_in, pattern, src_field);
+               *dst_field = MLX5_GET(copy_action_in, pattern, dst_field);
+               break;
+       default:
+               pr_warn("HWS: invalid modify header action type %d\n", action_type);
+       }
+}
+
+bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], size_t sz)
+{
+       size_t i;
+
+       for (i = 0; i < sz / MLX5HWS_MODIFY_ACTION_SIZE; i++) {
+               u8 action_type =
+                       MLX5_GET(set_action_in, &pattern[i], action_type);
+               if (action_type >= MLX5_MODIFICATION_TYPE_MAX) {
+                       mlx5hws_err(ctx, "Unsupported action id %d\n", action_type);
+                       return false;
+               }
+               if (hws_action_modify_check_field_limitation(action_type, &pattern[i])) {
+                       mlx5hws_err(ctx, "Unsupported action number %zu\n", i);
+                       return false;
+               }
+       }
+
+       return true;
+}
+
+void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions,
+                          size_t max_actions, size_t *new_size,
+                          u32 *nope_location, __be64 *new_pat)
+{
+       u16 prev_src_field = 0, prev_dst_field = 0;
+       u16 src_field, dst_field;
+       u8 action_type;
+       size_t i, j;
+
+       *new_size = num_actions;
+       *nope_location = 0;
+
+       if (num_actions == 1)
+               return;
+
+       for (i = 0, j = 0; i < num_actions; i++, j++) {
+               action_type = MLX5_GET(set_action_in, &pattern[i], action_type);
+
+               hws_action_modify_get_target_fields(action_type, &pattern[i],
+                                                   &src_field, &dst_field);
+               if (i % 2) {
+                       if (action_type == MLX5_ACTION_TYPE_COPY &&
+                           (prev_src_field == src_field ||
+                            prev_dst_field == dst_field)) {
+                               /* need Nope */
+                               *new_size += 1;
+                               *nope_location |= BIT(i);
+                               memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE);
+                               MLX5_SET(set_action_in, &new_pat[j],
+                                        action_type,
+                                        MLX5_MODIFICATION_TYPE_NOP);
+                               j++;
+                       } else if (prev_src_field == src_field) {
+                               /* need Nope*/
+                               *new_size += 1;
+                               *nope_location |= BIT(i);
+                               MLX5_SET(set_action_in, &new_pat[j],
+                                        action_type,
+                                        MLX5_MODIFICATION_TYPE_NOP);
+                               j++;
+                       }
+               }
+               memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE);
+               /* check if no more space */
+               if (j > max_actions) {
+                       *new_size = num_actions;
+                       *nope_location = 0;
+                       return;
+               }
+
+               prev_src_field = src_field;
+               prev_dst_field = dst_field;
+       }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
new file mode 100644 (file)
index 0000000..27ca933
--- /dev/null
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_PAT_ARG_H_
+#define MLX5HWS_PAT_ARG_H_
+
+/* Modify-header arg pool */
+enum mlx5hws_arg_chunk_size {
+       MLX5HWS_ARG_CHUNK_SIZE_1,
+       /* Keep MIN updated when changing */
+       MLX5HWS_ARG_CHUNK_SIZE_MIN = MLX5HWS_ARG_CHUNK_SIZE_1,
+       MLX5HWS_ARG_CHUNK_SIZE_2,
+       MLX5HWS_ARG_CHUNK_SIZE_3,
+       MLX5HWS_ARG_CHUNK_SIZE_4,
+       MLX5HWS_ARG_CHUNK_SIZE_MAX,
+};
+
+enum {
+       MLX5HWS_MODIFY_ACTION_SIZE = 8,
+       MLX5HWS_ARG_DATA_SIZE = 64,
+};
+
+struct mlx5hws_pattern_cache {
+       struct mutex lock; /* Protect pattern list */
+       struct list_head ptrn_list;
+};
+
+struct mlx5hws_pattern_cache_item {
+       struct {
+               u32 pattern_id;
+               u8 *data;
+               u16 num_of_actions;
+       } mh_data;
+       u32 refcount;
+       struct list_head ptrn_list_node;
+};
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_get_arg_log_size(u16 num_of_actions);
+
+u32 mlx5hws_arg_get_arg_size(u16 num_of_actions);
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_data_size_to_arg_log_size(u16 data_size);
+
+u32 mlx5hws_arg_data_size_to_arg_size(u16 data_size);
+
+int mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache **cache);
+
+void mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache *cache);
+
+bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], size_t sz);
+
+int mlx5hws_arg_create(struct mlx5hws_context *ctx,
+                      u8 *data,
+                      size_t data_sz,
+                      u32 log_bulk_sz,
+                      bool write_data,
+                      u32 *arg_id);
+
+void mlx5hws_arg_destroy(struct mlx5hws_context *ctx, u32 arg_id);
+
+int mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context *ctx,
+                                        __be64 *data,
+                                        u8 num_of_actions,
+                                        u32 log_bulk_sz,
+                                        bool write_data,
+                                        u32 *modify_hdr_arg_id);
+
+int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx,
+                           __be64 *pattern,
+                           size_t pattern_sz,
+                           u32 *ptrn_id);
+
+void mlx5hws_pat_put_pattern(struct mlx5hws_context *ctx,
+                            u32 ptrn_id);
+
+bool mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context *ctx,
+                                          u32 arg_size);
+
+bool mlx5hws_pat_require_reparse(__be64 *actions, u16 num_of_actions);
+
+void mlx5hws_arg_write(struct mlx5hws_send_engine *queue,
+                      void *comp_data,
+                      u32 arg_idx,
+                      u8 *arg_data,
+                      size_t data_size);
+
+void mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine *queue,
+                              u32 arg_idx,
+                              u8 *arg_data,
+                              u16 num_of_actions);
+
+int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx,
+                                     u32 arg_idx,
+                                     u8 *arg_data,
+                                     size_t data_size);
+
+void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions, size_t max_actions,
+                          size_t *new_size, u32 *nope_location, __be64 *new_pat);
+#endif /* MLX5HWS_PAT_ARG_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
new file mode 100644 (file)
index 0000000..fed2d91
--- /dev/null
@@ -0,0 +1,640 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+#include "buddy.h"
+
+static void hws_pool_free_one_resource(struct mlx5hws_pool_resource *resource)
+{
+       switch (resource->pool->type) {
+       case MLX5HWS_POOL_TYPE_STE:
+               mlx5hws_cmd_ste_destroy(resource->pool->ctx->mdev, resource->base_id);
+               break;
+       case MLX5HWS_POOL_TYPE_STC:
+               mlx5hws_cmd_stc_destroy(resource->pool->ctx->mdev, resource->base_id);
+               break;
+       default:
+               break;
+       }
+
+       kfree(resource);
+}
+
+static void hws_pool_resource_free(struct mlx5hws_pool *pool,
+                                  int resource_idx)
+{
+       hws_pool_free_one_resource(pool->resource[resource_idx]);
+       pool->resource[resource_idx] = NULL;
+
+       if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
+               hws_pool_free_one_resource(pool->mirror_resource[resource_idx]);
+               pool->mirror_resource[resource_idx] = NULL;
+       }
+}
+
+static struct mlx5hws_pool_resource *
+hws_pool_create_one_resource(struct mlx5hws_pool *pool, u32 log_range,
+                            u32 fw_ft_type)
+{
+       struct mlx5hws_cmd_ste_create_attr ste_attr;
+       struct mlx5hws_cmd_stc_create_attr stc_attr;
+       struct mlx5hws_pool_resource *resource;
+       u32 obj_id = 0;
+       int ret;
+
+       resource = kzalloc(sizeof(*resource), GFP_KERNEL);
+       if (!resource)
+               return NULL;
+
+       switch (pool->type) {
+       case MLX5HWS_POOL_TYPE_STE:
+               ste_attr.log_obj_range = log_range;
+               ste_attr.table_type = fw_ft_type;
+               ret = mlx5hws_cmd_ste_create(pool->ctx->mdev, &ste_attr, &obj_id);
+               break;
+       case MLX5HWS_POOL_TYPE_STC:
+               stc_attr.log_obj_range = log_range;
+               stc_attr.table_type = fw_ft_type;
+               ret = mlx5hws_cmd_stc_create(pool->ctx->mdev, &stc_attr, &obj_id);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+
+       if (ret) {
+               mlx5hws_err(pool->ctx, "Failed to allocate resource objects\n");
+               goto free_resource;
+       }
+
+       resource->pool = pool;
+       resource->range = 1 << log_range;
+       resource->base_id = obj_id;
+
+       return resource;
+
+free_resource:
+       kfree(resource);
+       return NULL;
+}
+
+static int
+hws_pool_resource_alloc(struct mlx5hws_pool *pool, u32 log_range, int idx)
+{
+       struct mlx5hws_pool_resource *resource;
+       u32 fw_ft_type, opt_log_range;
+
+       fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, false);
+       opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_ORIG ? 0 : log_range;
+       resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
+       if (!resource) {
+               mlx5hws_err(pool->ctx, "Failed allocating resource\n");
+               return -EINVAL;
+       }
+
+       pool->resource[idx] = resource;
+
+       if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
+               struct mlx5hws_pool_resource *mirror_resource;
+
+               fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, true);
+               opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_MIRROR ? 0 : log_range;
+               mirror_resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
+               if (!mirror_resource) {
+                       mlx5hws_err(pool->ctx, "Failed allocating mirrored resource\n");
+                       hws_pool_free_one_resource(resource);
+                       pool->resource[idx] = NULL;
+                       return -EINVAL;
+               }
+               pool->mirror_resource[idx] = mirror_resource;
+       }
+
+       return 0;
+}
+
+static unsigned long *hws_pool_create_and_init_bitmap(u32 log_range)
+{
+       unsigned long *cur_bmp;
+
+       cur_bmp = bitmap_zalloc(1 << log_range, GFP_KERNEL);
+       if (!cur_bmp)
+               return NULL;
+
+       bitmap_fill(cur_bmp, 1 << log_range);
+
+       return cur_bmp;
+}
+
+static void hws_pool_buddy_db_put_chunk(struct mlx5hws_pool *pool,
+                                       struct mlx5hws_pool_chunk *chunk)
+{
+       struct mlx5hws_buddy_mem *buddy;
+
+       buddy = pool->db.buddy_manager->buddies[chunk->resource_idx];
+       if (!buddy) {
+               mlx5hws_err(pool->ctx, "No such buddy (%d)\n", chunk->resource_idx);
+               return;
+       }
+
+       mlx5hws_buddy_free_mem(buddy, chunk->offset, chunk->order);
+}
+
+static struct mlx5hws_buddy_mem *
+hws_pool_buddy_get_next_buddy(struct mlx5hws_pool *pool, int idx,
+                             u32 order, bool *is_new_buddy)
+{
+       static struct mlx5hws_buddy_mem *buddy;
+       u32 new_buddy_size;
+
+       buddy = pool->db.buddy_manager->buddies[idx];
+       if (buddy)
+               return buddy;
+
+       new_buddy_size = max(pool->alloc_log_sz, order);
+       *is_new_buddy = true;
+       buddy = mlx5hws_buddy_create(new_buddy_size);
+       if (!buddy) {
+               mlx5hws_err(pool->ctx, "Failed to create buddy order: %d index: %d\n",
+                           new_buddy_size, idx);
+               return NULL;
+       }
+
+       if (hws_pool_resource_alloc(pool, new_buddy_size, idx) != 0) {
+               mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
+                           pool->type, new_buddy_size, idx);
+               mlx5hws_buddy_cleanup(buddy);
+               return NULL;
+       }
+
+       pool->db.buddy_manager->buddies[idx] = buddy;
+
+       return buddy;
+}
+
+static int hws_pool_buddy_get_mem_chunk(struct mlx5hws_pool *pool,
+                                       int order,
+                                       u32 *buddy_idx,
+                                       int *seg)
+{
+       struct mlx5hws_buddy_mem *buddy;
+       bool new_mem = false;
+       int ret = 0;
+       int i;
+
+       *seg = -1;
+
+       /* Find the next free place from the buddy array */
+       while (*seg == -1) {
+               for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
+                       buddy = hws_pool_buddy_get_next_buddy(pool, i,
+                                                             order,
+                                                             &new_mem);
+                       if (!buddy) {
+                               ret = -ENOMEM;
+                               goto out;
+                       }
+
+                       *seg = mlx5hws_buddy_alloc_mem(buddy, order);
+                       if (*seg != -1)
+                               goto found;
+
+                       if (pool->flags & MLX5HWS_POOL_FLAGS_ONE_RESOURCE) {
+                               mlx5hws_err(pool->ctx,
+                                           "Fail to allocate seg for one resource pool\n");
+                               ret = -ENOMEM;
+                               goto out;
+                       }
+
+                       if (new_mem) {
+                               /* We have new memory pool, should be place for us */
+                               mlx5hws_err(pool->ctx,
+                                           "No memory for order: %d with buddy no: %d\n",
+                                           order, i);
+                               ret = -ENOMEM;
+                               goto out;
+                       }
+               }
+       }
+
+found:
+       *buddy_idx = i;
+out:
+       return ret;
+}
+
+static int hws_pool_buddy_db_get_chunk(struct mlx5hws_pool *pool,
+                                      struct mlx5hws_pool_chunk *chunk)
+{
+       int ret = 0;
+
+       /* Go over the buddies and find next free slot */
+       ret = hws_pool_buddy_get_mem_chunk(pool, chunk->order,
+                                          &chunk->resource_idx,
+                                          &chunk->offset);
+       if (ret)
+               mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
+                           chunk->order);
+
+       return ret;
+}
+
+static void hws_pool_buddy_db_uninit(struct mlx5hws_pool *pool)
+{
+       struct mlx5hws_buddy_mem *buddy;
+       int i;
+
+       for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
+               buddy = pool->db.buddy_manager->buddies[i];
+               if (buddy) {
+                       mlx5hws_buddy_cleanup(buddy);
+                       kfree(buddy);
+                       pool->db.buddy_manager->buddies[i] = NULL;
+               }
+       }
+
+       kfree(pool->db.buddy_manager);
+}
+
+static int hws_pool_buddy_db_init(struct mlx5hws_pool *pool, u32 log_range)
+{
+       pool->db.buddy_manager = kzalloc(sizeof(*pool->db.buddy_manager), GFP_KERNEL);
+       if (!pool->db.buddy_manager)
+               return -ENOMEM;
+
+       if (pool->flags & MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE) {
+               bool new_buddy;
+
+               if (!hws_pool_buddy_get_next_buddy(pool, 0, log_range, &new_buddy)) {
+                       mlx5hws_err(pool->ctx,
+                                   "Failed allocating memory on create log_sz: %d\n", log_range);
+                       kfree(pool->db.buddy_manager);
+                       return -ENOMEM;
+               }
+       }
+
+       pool->p_db_uninit = &hws_pool_buddy_db_uninit;
+       pool->p_get_chunk = &hws_pool_buddy_db_get_chunk;
+       pool->p_put_chunk = &hws_pool_buddy_db_put_chunk;
+
+       return 0;
+}
+
+static int hws_pool_create_resource_on_index(struct mlx5hws_pool *pool,
+                                            u32 alloc_size, int idx)
+{
+       int ret = hws_pool_resource_alloc(pool, alloc_size, idx);
+
+       if (ret) {
+               mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
+                           pool->type, alloc_size, idx);
+               return ret;
+       }
+
+       return 0;
+}
+
+static struct mlx5hws_pool_elements *
+hws_pool_element_create_new_elem(struct mlx5hws_pool *pool, u32 order, int idx)
+{
+       struct mlx5hws_pool_elements *elem;
+       u32 alloc_size;
+
+       alloc_size = pool->alloc_log_sz;
+
+       elem = kzalloc(sizeof(*elem), GFP_KERNEL);
+       if (!elem)
+               return NULL;
+
+       /* Sharing the same resource, also means that all the elements are with size 1 */
+       if ((pool->flags & MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS) &&
+           !(pool->flags & MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK)) {
+                /* Currently all chunks in size 1 */
+               elem->bitmap = hws_pool_create_and_init_bitmap(alloc_size - order);
+               if (!elem->bitmap) {
+                       mlx5hws_err(pool->ctx,
+                                   "Failed to create bitmap type: %d: size %d index: %d\n",
+                                   pool->type, alloc_size, idx);
+                       goto free_elem;
+               }
+
+               elem->log_size = alloc_size - order;
+       }
+
+       if (hws_pool_create_resource_on_index(pool, alloc_size, idx)) {
+               mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
+                           pool->type, alloc_size, idx);
+               goto free_db;
+       }
+
+       pool->db.element_manager->elements[idx] = elem;
+
+       return elem;
+
+free_db:
+       bitmap_free(elem->bitmap);
+free_elem:
+       kfree(elem);
+       return NULL;
+}
+
+static int hws_pool_element_find_seg(struct mlx5hws_pool_elements *elem, int *seg)
+{
+       unsigned int segment, size;
+
+       size = 1 << elem->log_size;
+
+       segment = find_first_bit(elem->bitmap, size);
+       if (segment >= size) {
+               elem->is_full = true;
+               return -ENOMEM;
+       }
+
+       bitmap_clear(elem->bitmap, segment, 1);
+       *seg = segment;
+       return 0;
+}
+
+static int
+hws_pool_onesize_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order,
+                                      u32 *idx, int *seg)
+{
+       struct mlx5hws_pool_elements *elem;
+
+       elem = pool->db.element_manager->elements[0];
+       if (!elem)
+               elem = hws_pool_element_create_new_elem(pool, order, 0);
+       if (!elem)
+               goto err_no_elem;
+
+       if (hws_pool_element_find_seg(elem, seg) != 0) {
+               mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order);
+               return -ENOMEM;
+       }
+
+       *idx = 0;
+       elem->num_of_elements++;
+       return 0;
+
+err_no_elem:
+       mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order);
+       return -ENOMEM;
+}
+
+static int
+hws_pool_general_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order,
+                                      u32 *idx, int *seg)
+{
+       int ret, i;
+
+       for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
+               if (!pool->resource[i]) {
+                       ret = hws_pool_create_resource_on_index(pool, order, i);
+                       if (ret)
+                               goto err_no_res;
+                       *idx = i;
+                       *seg = 0; /* One memory slot in that element */
+                       return 0;
+               }
+       }
+
+       mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order);
+       return -ENOMEM;
+
+err_no_res:
+       mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order);
+       return -ENOMEM;
+}
+
+static int hws_pool_general_element_db_get_chunk(struct mlx5hws_pool *pool,
+                                                struct mlx5hws_pool_chunk *chunk)
+{
+       int ret;
+
+       /* Go over all memory elements and find/allocate free slot */
+       ret = hws_pool_general_element_get_mem_chunk(pool, chunk->order,
+                                                    &chunk->resource_idx,
+                                                    &chunk->offset);
+       if (ret)
+               mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
+                           chunk->order);
+
+       return ret;
+}
+
+static void hws_pool_general_element_db_put_chunk(struct mlx5hws_pool *pool,
+                                                 struct mlx5hws_pool_chunk *chunk)
+{
+       if (unlikely(!pool->resource[chunk->resource_idx]))
+               pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
+
+       if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE)
+               hws_pool_resource_free(pool, chunk->resource_idx);
+}
+
+static void hws_pool_general_element_db_uninit(struct mlx5hws_pool *pool)
+{
+       (void)pool;
+}
+
+/* This memory management works as the following:
+ * - At start doesn't allocate no mem at all.
+ * - When new request for chunk arrived:
+ *     allocate resource and give it.
+ * - When free that chunk:
+ *     the resource is freed.
+ */
+static int hws_pool_general_element_db_init(struct mlx5hws_pool *pool)
+{
+       pool->p_db_uninit = &hws_pool_general_element_db_uninit;
+       pool->p_get_chunk = &hws_pool_general_element_db_get_chunk;
+       pool->p_put_chunk = &hws_pool_general_element_db_put_chunk;
+
+       return 0;
+}
+
+static void hws_onesize_element_db_destroy_element(struct mlx5hws_pool *pool,
+                                                  struct mlx5hws_pool_elements *elem,
+                                                  struct mlx5hws_pool_chunk *chunk)
+{
+       if (unlikely(!pool->resource[chunk->resource_idx]))
+               pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
+
+       hws_pool_resource_free(pool, chunk->resource_idx);
+       kfree(elem);
+       pool->db.element_manager->elements[chunk->resource_idx] = NULL;
+}
+
+static void hws_onesize_element_db_put_chunk(struct mlx5hws_pool *pool,
+                                            struct mlx5hws_pool_chunk *chunk)
+{
+       struct mlx5hws_pool_elements *elem;
+
+       if (unlikely(chunk->resource_idx))
+               pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
+
+       elem = pool->db.element_manager->elements[chunk->resource_idx];
+       if (!elem) {
+               mlx5hws_err(pool->ctx, "No such element (%d)\n", chunk->resource_idx);
+               return;
+       }
+
+       bitmap_set(elem->bitmap, chunk->offset, 1);
+       elem->is_full = false;
+       elem->num_of_elements--;
+
+       if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE &&
+           !elem->num_of_elements)
+               hws_onesize_element_db_destroy_element(pool, elem, chunk);
+}
+
+static int hws_onesize_element_db_get_chunk(struct mlx5hws_pool *pool,
+                                           struct mlx5hws_pool_chunk *chunk)
+{
+       int ret = 0;
+
+       /* Go over all memory elements and find/allocate free slot */
+       ret = hws_pool_onesize_element_get_mem_chunk(pool, chunk->order,
+                                                    &chunk->resource_idx,
+                                                    &chunk->offset);
+       if (ret)
+               mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
+                           chunk->order);
+
+       return ret;
+}
+
+static void hws_onesize_element_db_uninit(struct mlx5hws_pool *pool)
+{
+       struct mlx5hws_pool_elements *elem;
+       int i;
+
+       for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
+               elem = pool->db.element_manager->elements[i];
+               if (elem) {
+                       bitmap_free(elem->bitmap);
+                       kfree(elem);
+                       pool->db.element_manager->elements[i] = NULL;
+               }
+       }
+       kfree(pool->db.element_manager);
+}
+
+/* This memory management works as the following:
+ * - At start doesn't allocate no mem at all.
+ * - When new request for chunk arrived:
+ *  aloocate the first and only slot of memory/resource
+ *  when it ended return error.
+ */
+static int hws_pool_onesize_element_db_init(struct mlx5hws_pool *pool)
+{
+       pool->db.element_manager = kzalloc(sizeof(*pool->db.element_manager), GFP_KERNEL);
+       if (!pool->db.element_manager)
+               return -ENOMEM;
+
+       pool->p_db_uninit = &hws_onesize_element_db_uninit;
+       pool->p_get_chunk = &hws_onesize_element_db_get_chunk;
+       pool->p_put_chunk = &hws_onesize_element_db_put_chunk;
+
+       return 0;
+}
+
+static int hws_pool_db_init(struct mlx5hws_pool *pool,
+                           enum mlx5hws_db_type db_type)
+{
+       int ret;
+
+       if (db_type == MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE)
+               ret = hws_pool_general_element_db_init(pool);
+       else if (db_type == MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE)
+               ret = hws_pool_onesize_element_db_init(pool);
+       else
+               ret = hws_pool_buddy_db_init(pool, pool->alloc_log_sz);
+
+       if (ret) {
+               mlx5hws_err(pool->ctx, "Failed to init general db : %d (ret: %d)\n", db_type, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void hws_pool_db_unint(struct mlx5hws_pool *pool)
+{
+       pool->p_db_uninit(pool);
+}
+
+int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
+                            struct mlx5hws_pool_chunk *chunk)
+{
+       int ret;
+
+       mutex_lock(&pool->lock);
+       ret = pool->p_get_chunk(pool, chunk);
+       mutex_unlock(&pool->lock);
+
+       return ret;
+}
+
+void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
+                            struct mlx5hws_pool_chunk *chunk)
+{
+       mutex_lock(&pool->lock);
+       pool->p_put_chunk(pool, chunk);
+       mutex_unlock(&pool->lock);
+}
+
+struct mlx5hws_pool *
+mlx5hws_pool_create(struct mlx5hws_context *ctx, struct mlx5hws_pool_attr *pool_attr)
+{
+       enum mlx5hws_db_type res_db_type;
+       struct mlx5hws_pool *pool;
+
+       pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+       if (!pool)
+               return NULL;
+
+       pool->ctx = ctx;
+       pool->type = pool_attr->pool_type;
+       pool->alloc_log_sz = pool_attr->alloc_log_sz;
+       pool->flags = pool_attr->flags;
+       pool->tbl_type = pool_attr->table_type;
+       pool->opt_type = pool_attr->opt_type;
+
+       /* Support general db */
+       if (pool->flags == (MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE |
+                           MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK))
+               res_db_type = MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE;
+       else if (pool->flags == (MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
+                                MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS))
+               res_db_type = MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE;
+       else
+               res_db_type = MLX5HWS_POOL_DB_TYPE_BUDDY;
+
+       pool->alloc_log_sz = pool_attr->alloc_log_sz;
+
+       if (hws_pool_db_init(pool, res_db_type))
+               goto free_pool;
+
+       mutex_init(&pool->lock);
+
+       return pool;
+
+free_pool:
+       kfree(pool);
+       return NULL;
+}
+
+int mlx5hws_pool_destroy(struct mlx5hws_pool *pool)
+{
+       int i;
+
+       mutex_destroy(&pool->lock);
+
+       for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++)
+               if (pool->resource[i])
+                       hws_pool_resource_free(pool, i);
+
+       hws_pool_db_unint(pool);
+
+       kfree(pool);
+       return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h
new file mode 100644 (file)
index 0000000..621298b
--- /dev/null
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_POOL_H_
+#define MLX5HWS_POOL_H_
+
+#define MLX5HWS_POOL_STC_LOG_SZ 15
+
+#define MLX5HWS_POOL_RESOURCE_ARR_SZ 100
+
+enum mlx5hws_pool_type {
+       MLX5HWS_POOL_TYPE_STE,
+       MLX5HWS_POOL_TYPE_STC,
+};
+
+struct mlx5hws_pool_chunk {
+       u32 resource_idx;
+       /* Internal offset, relative to base index */
+       int offset;
+       int order;
+};
+
+struct mlx5hws_pool_resource {
+       struct mlx5hws_pool *pool;
+       u32 base_id;
+       u32 range;
+};
+
+enum mlx5hws_pool_flags {
+       /* Only a one resource in that pool */
+       MLX5HWS_POOL_FLAGS_ONE_RESOURCE = 1 << 0,
+       MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE = 1 << 1,
+       /* No sharing resources between chunks */
+       MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK = 1 << 2,
+       /* All objects are in the same size */
+       MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS = 1 << 3,
+       /* Managed by buddy allocator */
+       MLX5HWS_POOL_FLAGS_BUDDY_MANAGED = 1 << 4,
+       /* Allocate pool_type memory on pool creation */
+       MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE = 1 << 5,
+
+       /* These values should be used by the caller */
+       MLX5HWS_POOL_FLAGS_FOR_STC_POOL =
+               MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
+               MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS,
+       MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL =
+               MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE |
+               MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK,
+       MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL =
+               MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
+               MLX5HWS_POOL_FLAGS_BUDDY_MANAGED |
+               MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE,
+};
+
+enum mlx5hws_pool_optimize {
+       MLX5HWS_POOL_OPTIMIZE_NONE = 0x0,
+       MLX5HWS_POOL_OPTIMIZE_ORIG = 0x1,
+       MLX5HWS_POOL_OPTIMIZE_MIRROR = 0x2,
+};
+
+struct mlx5hws_pool_attr {
+       enum mlx5hws_pool_type pool_type;
+       enum mlx5hws_table_type table_type;
+       enum mlx5hws_pool_flags flags;
+       enum mlx5hws_pool_optimize opt_type;
+       /* Allocation size once memory is depleted */
+       size_t alloc_log_sz;
+};
+
+enum mlx5hws_db_type {
+       /* Uses for allocating chunk of big memory, each element has its own resource in the FW*/
+       MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE,
+       /* One resource only, all the elements are with same one size */
+       MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE,
+       /* Many resources, the memory allocated with buddy mechanism */
+       MLX5HWS_POOL_DB_TYPE_BUDDY,
+};
+
+struct mlx5hws_buddy_manager {
+       struct mlx5hws_buddy_mem *buddies[MLX5HWS_POOL_RESOURCE_ARR_SZ];
+};
+
+struct mlx5hws_pool_elements {
+       u32 num_of_elements;
+       unsigned long *bitmap;
+       u32 log_size;
+       bool is_full;
+};
+
+struct mlx5hws_element_manager {
+       struct mlx5hws_pool_elements *elements[MLX5HWS_POOL_RESOURCE_ARR_SZ];
+};
+
+struct mlx5hws_pool_db {
+       enum mlx5hws_db_type type;
+       union {
+               struct mlx5hws_element_manager *element_manager;
+               struct mlx5hws_buddy_manager *buddy_manager;
+       };
+};
+
+typedef int (*mlx5hws_pool_db_get_chunk)(struct mlx5hws_pool *pool,
+                                       struct mlx5hws_pool_chunk *chunk);
+typedef void (*mlx5hws_pool_db_put_chunk)(struct mlx5hws_pool *pool,
+                                        struct mlx5hws_pool_chunk *chunk);
+typedef void (*mlx5hws_pool_unint_db)(struct mlx5hws_pool *pool);
+
+struct mlx5hws_pool {
+       struct mlx5hws_context *ctx;
+       enum mlx5hws_pool_type type;
+       enum mlx5hws_pool_flags flags;
+       struct mutex lock; /* protect the pool */
+       size_t alloc_log_sz;
+       enum mlx5hws_table_type tbl_type;
+       enum mlx5hws_pool_optimize opt_type;
+       struct mlx5hws_pool_resource *resource[MLX5HWS_POOL_RESOURCE_ARR_SZ];
+       struct mlx5hws_pool_resource *mirror_resource[MLX5HWS_POOL_RESOURCE_ARR_SZ];
+       /* DB */
+       struct mlx5hws_pool_db db;
+       /* Functions */
+       mlx5hws_pool_unint_db p_db_uninit;
+       mlx5hws_pool_db_get_chunk p_get_chunk;
+       mlx5hws_pool_db_put_chunk p_put_chunk;
+};
+
+struct mlx5hws_pool *
+mlx5hws_pool_create(struct mlx5hws_context *ctx,
+                   struct mlx5hws_pool_attr *pool_attr);
+
+int mlx5hws_pool_destroy(struct mlx5hws_pool *pool);
+
+int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
+                            struct mlx5hws_pool_chunk *chunk);
+
+void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
+                            struct mlx5hws_pool_chunk *chunk);
+
+static inline u32
+mlx5hws_pool_chunk_get_base_id(struct mlx5hws_pool *pool,
+                              struct mlx5hws_pool_chunk *chunk)
+{
+       return pool->resource[chunk->resource_idx]->base_id;
+}
+
+static inline u32
+mlx5hws_pool_chunk_get_base_mirror_id(struct mlx5hws_pool *pool,
+                                     struct mlx5hws_pool_chunk *chunk)
+{
+       return pool->mirror_resource[chunk->resource_idx]->base_id;
+}
+#endif /* MLX5HWS_POOL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h
new file mode 100644 (file)
index 0000000..de92cec
--- /dev/null
@@ -0,0 +1,514 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5_PRM_H_
+#define MLX5_PRM_H_
+
+#define MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY 512
+
+/* Action type of header modification. */
+enum {
+       MLX5_MODIFICATION_TYPE_SET = 0x1,
+       MLX5_MODIFICATION_TYPE_ADD = 0x2,
+       MLX5_MODIFICATION_TYPE_COPY = 0x3,
+       MLX5_MODIFICATION_TYPE_INSERT = 0x4,
+       MLX5_MODIFICATION_TYPE_REMOVE = 0x5,
+       MLX5_MODIFICATION_TYPE_NOP = 0x6,
+       MLX5_MODIFICATION_TYPE_REMOVE_WORDS = 0x7,
+       MLX5_MODIFICATION_TYPE_ADD_FIELD = 0x8,
+       MLX5_MODIFICATION_TYPE_MAX,
+};
+
+/* The field of packet to be modified. */
+enum mlx5_modification_field {
+       MLX5_MODI_OUT_NONE = -1,
+       MLX5_MODI_OUT_SMAC_47_16 = 1,
+       MLX5_MODI_OUT_SMAC_15_0,
+       MLX5_MODI_OUT_ETHERTYPE,
+       MLX5_MODI_OUT_DMAC_47_16,
+       MLX5_MODI_OUT_DMAC_15_0,
+       MLX5_MODI_OUT_IP_DSCP,
+       MLX5_MODI_OUT_TCP_FLAGS,
+       MLX5_MODI_OUT_TCP_SPORT,
+       MLX5_MODI_OUT_TCP_DPORT,
+       MLX5_MODI_OUT_IPV4_TTL,
+       MLX5_MODI_OUT_UDP_SPORT,
+       MLX5_MODI_OUT_UDP_DPORT,
+       MLX5_MODI_OUT_SIPV6_127_96,
+       MLX5_MODI_OUT_SIPV6_95_64,
+       MLX5_MODI_OUT_SIPV6_63_32,
+       MLX5_MODI_OUT_SIPV6_31_0,
+       MLX5_MODI_OUT_DIPV6_127_96,
+       MLX5_MODI_OUT_DIPV6_95_64,
+       MLX5_MODI_OUT_DIPV6_63_32,
+       MLX5_MODI_OUT_DIPV6_31_0,
+       MLX5_MODI_OUT_SIPV4,
+       MLX5_MODI_OUT_DIPV4,
+       MLX5_MODI_OUT_FIRST_VID,
+       MLX5_MODI_IN_SMAC_47_16 = 0x31,
+       MLX5_MODI_IN_SMAC_15_0,
+       MLX5_MODI_IN_ETHERTYPE,
+       MLX5_MODI_IN_DMAC_47_16,
+       MLX5_MODI_IN_DMAC_15_0,
+       MLX5_MODI_IN_IP_DSCP,
+       MLX5_MODI_IN_TCP_FLAGS,
+       MLX5_MODI_IN_TCP_SPORT,
+       MLX5_MODI_IN_TCP_DPORT,
+       MLX5_MODI_IN_IPV4_TTL,
+       MLX5_MODI_IN_UDP_SPORT,
+       MLX5_MODI_IN_UDP_DPORT,
+       MLX5_MODI_IN_SIPV6_127_96,
+       MLX5_MODI_IN_SIPV6_95_64,
+       MLX5_MODI_IN_SIPV6_63_32,
+       MLX5_MODI_IN_SIPV6_31_0,
+       MLX5_MODI_IN_DIPV6_127_96,
+       MLX5_MODI_IN_DIPV6_95_64,
+       MLX5_MODI_IN_DIPV6_63_32,
+       MLX5_MODI_IN_DIPV6_31_0,
+       MLX5_MODI_IN_SIPV4,
+       MLX5_MODI_IN_DIPV4,
+       MLX5_MODI_OUT_IPV6_HOPLIMIT,
+       MLX5_MODI_IN_IPV6_HOPLIMIT,
+       MLX5_MODI_META_DATA_REG_A,
+       MLX5_MODI_META_DATA_REG_B = 0x50,
+       MLX5_MODI_META_REG_C_0,
+       MLX5_MODI_META_REG_C_1,
+       MLX5_MODI_META_REG_C_2,
+       MLX5_MODI_META_REG_C_3,
+       MLX5_MODI_META_REG_C_4,
+       MLX5_MODI_META_REG_C_5,
+       MLX5_MODI_META_REG_C_6,
+       MLX5_MODI_META_REG_C_7,
+       MLX5_MODI_OUT_TCP_SEQ_NUM,
+       MLX5_MODI_IN_TCP_SEQ_NUM,
+       MLX5_MODI_OUT_TCP_ACK_NUM,
+       MLX5_MODI_IN_TCP_ACK_NUM = 0x5C,
+       MLX5_MODI_GTP_TEID = 0x6E,
+       MLX5_MODI_OUT_IP_ECN = 0x73,
+       MLX5_MODI_TUNNEL_HDR_DW_1 = 0x75,
+       MLX5_MODI_GTPU_FIRST_EXT_DW_0 = 0x76,
+       MLX5_MODI_HASH_RESULT = 0x81,
+       MLX5_MODI_IN_MPLS_LABEL_0 = 0x8a,
+       MLX5_MODI_IN_MPLS_LABEL_1,
+       MLX5_MODI_IN_MPLS_LABEL_2,
+       MLX5_MODI_IN_MPLS_LABEL_3,
+       MLX5_MODI_IN_MPLS_LABEL_4,
+       MLX5_MODI_OUT_IP_PROTOCOL = 0x4A,
+       MLX5_MODI_OUT_IPV6_NEXT_HDR = 0x4A,
+       MLX5_MODI_META_REG_C_8 = 0x8F,
+       MLX5_MODI_META_REG_C_9 = 0x90,
+       MLX5_MODI_META_REG_C_10 = 0x91,
+       MLX5_MODI_META_REG_C_11 = 0x92,
+       MLX5_MODI_META_REG_C_12 = 0x93,
+       MLX5_MODI_META_REG_C_13 = 0x94,
+       MLX5_MODI_META_REG_C_14 = 0x95,
+       MLX5_MODI_META_REG_C_15 = 0x96,
+       MLX5_MODI_OUT_IPV4_TOTAL_LEN = 0x11D,
+       MLX5_MODI_OUT_IPV6_PAYLOAD_LEN = 0x11E,
+       MLX5_MODI_OUT_IPV4_IHL = 0x11F,
+       MLX5_MODI_OUT_TCP_DATA_OFFSET = 0x120,
+       MLX5_MODI_OUT_ESP_SPI = 0x5E,
+       MLX5_MODI_OUT_ESP_SEQ_NUM = 0x82,
+       MLX5_MODI_OUT_IPSEC_NEXT_HDR = 0x126,
+       MLX5_MODI_INVALID = INT_MAX,
+};
+
+enum {
+       MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE = 0x7 << 1,
+       MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE = 0x8 << 1,
+       MLX5_SET_HCA_CAP_OP_MOD_ESW = 0x9 << 1,
+       MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE = 0x1B << 1,
+       MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 = 0x20 << 1,
+};
+
+enum mlx5_ifc_rtc_update_mode {
+       MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH = 0x0,
+       MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET = 0x1,
+};
+
+enum mlx5_ifc_rtc_access_mode {
+       MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH = 0x0,
+       MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR = 0x1,
+};
+
+enum mlx5_ifc_rtc_ste_format {
+       MLX5_IFC_RTC_STE_FORMAT_8DW = 0x4,
+       MLX5_IFC_RTC_STE_FORMAT_11DW = 0x5,
+       MLX5_IFC_RTC_STE_FORMAT_RANGE = 0x7,
+};
+
+enum mlx5_ifc_rtc_reparse_mode {
+       MLX5_IFC_RTC_REPARSE_NEVER = 0x0,
+       MLX5_IFC_RTC_REPARSE_ALWAYS = 0x1,
+       MLX5_IFC_RTC_REPARSE_BY_STC = 0x2,
+};
+
+#define MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX 16
+
+struct mlx5_ifc_rtc_bits {
+       u8 modify_field_select[0x40];
+       u8 reserved_at_40[0x40];
+       u8 update_index_mode[0x2];
+       u8 reparse_mode[0x2];
+       u8 num_match_ste[0x4];
+       u8 pd[0x18];
+       u8 reserved_at_a0[0x9];
+       u8 access_index_mode[0x3];
+       u8 num_hash_definer[0x4];
+       u8 update_method[0x1];
+       u8 reserved_at_b1[0x2];
+       u8 log_depth[0x5];
+       u8 log_hash_size[0x8];
+       u8 ste_format_0[0x8];
+       u8 table_type[0x8];
+       u8 ste_format_1[0x8];
+       u8 reserved_at_d8[0x8];
+       u8 match_definer_0[0x20];
+       u8 stc_id[0x20];
+       u8 ste_table_base_id[0x20];
+       u8 ste_table_offset[0x20];
+       u8 reserved_at_160[0x8];
+       u8 miss_flow_table_id[0x18];
+       u8 match_definer_1[0x20];
+       u8 reserved_at_1a0[0x260];
+};
+
+enum mlx5_ifc_stc_action_type {
+       MLX5_IFC_STC_ACTION_TYPE_NOP = 0x00,
+       MLX5_IFC_STC_ACTION_TYPE_COPY = 0x05,
+       MLX5_IFC_STC_ACTION_TYPE_SET = 0x06,
+       MLX5_IFC_STC_ACTION_TYPE_ADD = 0x07,
+       MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS = 0x08,
+       MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE = 0x09,
+       MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT = 0x0b,
+       MLX5_IFC_STC_ACTION_TYPE_TAG = 0x0c,
+       MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST = 0x0e,
+       MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_ENCRYPTION = 0x10,
+       MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_DECRYPTION = 0x11,
+       MLX5_IFC_STC_ACTION_TYPE_ASO = 0x12,
+       MLX5_IFC_STC_ACTION_TYPE_TRAILER = 0x13,
+       MLX5_IFC_STC_ACTION_TYPE_COUNTER = 0x14,
+       MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD = 0x1b,
+       MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE = 0x80,
+       MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR = 0x81,
+       MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT = 0x82,
+       MLX5_IFC_STC_ACTION_TYPE_DROP = 0x83,
+       MLX5_IFC_STC_ACTION_TYPE_ALLOW = 0x84,
+       MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT = 0x85,
+       MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK = 0x86,
+};
+
+enum mlx5_ifc_stc_reparse_mode {
+       MLX5_IFC_STC_REPARSE_IGNORE = 0x0,
+       MLX5_IFC_STC_REPARSE_NEVER = 0x1,
+       MLX5_IFC_STC_REPARSE_ALWAYS = 0x2,
+};
+
+struct mlx5_ifc_stc_ste_param_ste_table_bits {
+       u8 ste_obj_id[0x20];
+       u8 match_definer_id[0x20];
+       u8 reserved_at_40[0x3];
+       u8 log_hash_size[0x5];
+       u8 reserved_at_48[0x38];
+};
+
+struct mlx5_ifc_stc_ste_param_tir_bits {
+       u8 reserved_at_0[0x8];
+       u8 tirn[0x18];
+       u8 reserved_at_20[0x60];
+};
+
+struct mlx5_ifc_stc_ste_param_table_bits {
+       u8 reserved_at_0[0x8];
+       u8 table_id[0x18];
+       u8 reserved_at_20[0x60];
+};
+
+struct mlx5_ifc_stc_ste_param_flow_counter_bits {
+       u8 flow_counter_id[0x20];
+};
+
+enum {
+       MLX5_ASO_CT_NUM_PER_OBJ = 1,
+       MLX5_ASO_METER_NUM_PER_OBJ = 2,
+       MLX5_ASO_IPSEC_NUM_PER_OBJ = 1,
+       MLX5_ASO_FIRST_HIT_NUM_PER_OBJ = 512,
+};
+
+struct mlx5_ifc_stc_ste_param_execute_aso_bits {
+       u8 aso_object_id[0x20];
+       u8 return_reg_id[0x4];
+       u8 aso_type[0x4];
+       u8 reserved_at_28[0x18];
+};
+
+struct mlx5_ifc_stc_ste_param_ipsec_encrypt_bits {
+       u8 ipsec_object_id[0x20];
+};
+
+struct mlx5_ifc_stc_ste_param_ipsec_decrypt_bits {
+       u8 ipsec_object_id[0x20];
+};
+
+struct mlx5_ifc_stc_ste_param_trailer_bits {
+       u8 reserved_at_0[0x8];
+       u8 command[0x4];
+       u8 reserved_at_c[0x2];
+       u8 type[0x2];
+       u8 reserved_at_10[0xa];
+       u8 length[0x6];
+};
+
+struct mlx5_ifc_stc_ste_param_header_modify_list_bits {
+       u8 header_modify_pattern_id[0x20];
+       u8 header_modify_argument_id[0x20];
+};
+
+enum mlx5_ifc_header_anchors {
+       MLX5_HEADER_ANCHOR_PACKET_START = 0x0,
+       MLX5_HEADER_ANCHOR_MAC = 0x1,
+       MLX5_HEADER_ANCHOR_FIRST_VLAN_START = 0x2,
+       MLX5_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
+       MLX5_HEADER_ANCHOR_ESP = 0x08,
+       MLX5_HEADER_ANCHOR_TCP_UDP = 0x09,
+       MLX5_HEADER_ANCHOR_TUNNEL_HEADER = 0x0a,
+       MLX5_HEADER_ANCHOR_INNER_MAC = 0x13,
+       MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
+       MLX5_HEADER_ANCHOR_INNER_TCP_UDP = 0x1a,
+       MLX5_HEADER_ANCHOR_L4_PAYLOAD = 0x1b,
+       MLX5_HEADER_ANCHOR_INNER_L4_PAYLOAD = 0x1c
+};
+
+struct mlx5_ifc_stc_ste_param_remove_bits {
+       u8 action_type[0x4];
+       u8 decap[0x1];
+       u8 reserved_at_5[0x5];
+       u8 remove_start_anchor[0x6];
+       u8 reserved_at_10[0x2];
+       u8 remove_end_anchor[0x6];
+       u8 reserved_at_18[0x8];
+};
+
+struct mlx5_ifc_stc_ste_param_remove_words_bits {
+       u8 action_type[0x4];
+       u8 reserved_at_4[0x6];
+       u8 remove_start_anchor[0x6];
+       u8 reserved_at_10[0x1];
+       u8 remove_offset[0x7];
+       u8 reserved_at_18[0x2];
+       u8 remove_size[0x6];
+};
+
+struct mlx5_ifc_stc_ste_param_insert_bits {
+       u8 action_type[0x4];
+       u8 encap[0x1];
+       u8 inline_data[0x1];
+       u8 reserved_at_6[0x4];
+       u8 insert_anchor[0x6];
+       u8 reserved_at_10[0x1];
+       u8 insert_offset[0x7];
+       u8 reserved_at_18[0x1];
+       u8 insert_size[0x7];
+       u8 insert_argument[0x20];
+};
+
+struct mlx5_ifc_stc_ste_param_vport_bits {
+       u8 eswitch_owner_vhca_id[0x10];
+       u8 vport_number[0x10];
+       u8 eswitch_owner_vhca_id_valid[0x1];
+       u8 reserved_at_21[0x5f];
+};
+
+union mlx5_ifc_stc_param_bits {
+       struct mlx5_ifc_stc_ste_param_ste_table_bits ste_table;
+       struct mlx5_ifc_stc_ste_param_tir_bits tir;
+       struct mlx5_ifc_stc_ste_param_table_bits table;
+       struct mlx5_ifc_stc_ste_param_flow_counter_bits counter;
+       struct mlx5_ifc_stc_ste_param_header_modify_list_bits modify_header;
+       struct mlx5_ifc_stc_ste_param_execute_aso_bits aso;
+       struct mlx5_ifc_stc_ste_param_remove_bits remove_header;
+       struct mlx5_ifc_stc_ste_param_insert_bits insert_header;
+       struct mlx5_ifc_set_action_in_bits add;
+       struct mlx5_ifc_set_action_in_bits set;
+       struct mlx5_ifc_copy_action_in_bits copy;
+       struct mlx5_ifc_stc_ste_param_vport_bits vport;
+       struct mlx5_ifc_stc_ste_param_ipsec_encrypt_bits ipsec_encrypt;
+       struct mlx5_ifc_stc_ste_param_ipsec_decrypt_bits ipsec_decrypt;
+       struct mlx5_ifc_stc_ste_param_trailer_bits trailer;
+       u8 reserved_at_0[0x80];
+};
+
+enum {
+       MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC = BIT(0),
+};
+
+struct mlx5_ifc_stc_bits {
+       u8 modify_field_select[0x40];
+       u8 reserved_at_40[0x46];
+       u8 reparse_mode[0x2];
+       u8 table_type[0x8];
+       u8 ste_action_offset[0x8];
+       u8 action_type[0x8];
+       u8 reserved_at_a0[0x60];
+       union mlx5_ifc_stc_param_bits stc_param;
+       u8 reserved_at_180[0x280];
+};
+
+struct mlx5_ifc_ste_bits {
+       u8 modify_field_select[0x40];
+       u8 reserved_at_40[0x48];
+       u8 table_type[0x8];
+       u8 reserved_at_90[0x370];
+};
+
+struct mlx5_ifc_definer_bits {
+       u8 modify_field_select[0x40];
+       u8 reserved_at_40[0x50];
+       u8 format_id[0x10];
+       u8 reserved_at_60[0x60];
+       u8 format_select_dw3[0x8];
+       u8 format_select_dw2[0x8];
+       u8 format_select_dw1[0x8];
+       u8 format_select_dw0[0x8];
+       u8 format_select_dw7[0x8];
+       u8 format_select_dw6[0x8];
+       u8 format_select_dw5[0x8];
+       u8 format_select_dw4[0x8];
+       u8 reserved_at_100[0x18];
+       u8 format_select_dw8[0x8];
+       u8 reserved_at_120[0x20];
+       u8 format_select_byte3[0x8];
+       u8 format_select_byte2[0x8];
+       u8 format_select_byte1[0x8];
+       u8 format_select_byte0[0x8];
+       u8 format_select_byte7[0x8];
+       u8 format_select_byte6[0x8];
+       u8 format_select_byte5[0x8];
+       u8 format_select_byte4[0x8];
+       u8 reserved_at_180[0x40];
+       u8 ctrl[0xa0];
+       u8 match_mask[0x160];
+};
+
+struct mlx5_ifc_arg_bits {
+       u8 rsvd0[0x88];
+       u8 access_pd[0x18];
+};
+
+struct mlx5_ifc_header_modify_pattern_in_bits {
+       u8 modify_field_select[0x40];
+
+       u8 reserved_at_40[0x40];
+
+       u8 pattern_length[0x8];
+       u8 reserved_at_88[0x18];
+
+       u8 reserved_at_a0[0x60];
+
+       u8 pattern_data[MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY * 8];
+};
+
+struct mlx5_ifc_create_rtc_in_bits {
+       struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+       struct mlx5_ifc_rtc_bits rtc;
+};
+
+struct mlx5_ifc_create_stc_in_bits {
+       struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+       struct mlx5_ifc_stc_bits stc;
+};
+
+struct mlx5_ifc_create_ste_in_bits {
+       struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+       struct mlx5_ifc_ste_bits ste;
+};
+
+struct mlx5_ifc_create_definer_in_bits {
+       struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+       struct mlx5_ifc_definer_bits definer;
+};
+
+struct mlx5_ifc_create_arg_in_bits {
+       struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+       struct mlx5_ifc_arg_bits arg;
+};
+
+struct mlx5_ifc_create_header_modify_pattern_in_bits {
+       struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+       struct mlx5_ifc_header_modify_pattern_in_bits pattern;
+};
+
+struct mlx5_ifc_generate_wqe_in_bits {
+       u8 opcode[0x10];
+       u8 uid[0x10];
+       u8 reserved_at_20[0x10];
+       u8 op_mode[0x10];
+       u8 reserved_at_40[0x40];
+       u8 reserved_at_80[0x8];
+       u8 pdn[0x18];
+       u8 reserved_at_a0[0x160];
+       u8 wqe_ctrl[0x80];
+       u8 wqe_gta_ctrl[0x180];
+       u8 wqe_gta_data_0[0x200];
+       u8 wqe_gta_data_1[0x200];
+};
+
+struct mlx5_ifc_generate_wqe_out_bits {
+       u8 status[0x8];
+       u8 reserved_at_8[0x18];
+       u8 syndrome[0x20];
+       u8 reserved_at_40[0x1c0];
+       u8 cqe_data[0x200];
+};
+
+enum mlx5_access_aso_opc_mod {
+       ASO_OPC_MOD_IPSEC = 0x0,
+       ASO_OPC_MOD_CONNECTION_TRACKING = 0x1,
+       ASO_OPC_MOD_POLICER = 0x2,
+       ASO_OPC_MOD_RACE_AVOIDANCE = 0x3,
+       ASO_OPC_MOD_FLOW_HIT = 0x4,
+};
+
+enum {
+       MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION = BIT(0),
+       MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID = BIT(1),
+};
+
+enum {
+       MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_DEFAULT = 0,
+       MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL = 1,
+};
+
+struct mlx5_ifc_alloc_packet_reformat_out_bits {
+       u8 status[0x8];
+       u8 reserved_at_8[0x18];
+
+       u8 syndrome[0x20];
+
+       u8 packet_reformat_id[0x20];
+
+       u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_dealloc_packet_reformat_in_bits {
+       u8 opcode[0x10];
+       u8 reserved_at_10[0x10];
+
+       u8 reserved_at_20[0x10];
+       u8 op_mod[0x10];
+
+       u8 packet_reformat_id[0x20];
+
+       u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_dealloc_packet_reformat_out_bits {
+       u8 status[0x8];
+       u8 reserved_at_8[0x18];
+
+       u8 syndrome[0x20];
+
+       u8 reserved_at_40[0x40];
+};
+
+#endif /* MLX5_PRM_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
new file mode 100644 (file)
index 0000000..e20c67a
--- /dev/null
@@ -0,0 +1,780 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+static void hws_rule_skip(struct mlx5hws_matcher *matcher,
+                         struct mlx5hws_match_template *mt,
+                         u32 flow_source,
+                         bool *skip_rx, bool *skip_tx)
+{
+       /* By default FDB rules are added to both RX and TX */
+       *skip_rx = false;
+       *skip_tx = false;
+
+       if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT) {
+               *skip_rx = true;
+       } else if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK) {
+               *skip_tx = true;
+       } else {
+               /* If no flow source was set for current rule,
+                * check for flow source in matcher attributes.
+                */
+               if (matcher->attr.optimize_flow_src) {
+                       *skip_tx =
+                               matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE;
+                       *skip_rx =
+                               matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT;
+                       return;
+               }
+       }
+}
+
+static void
+hws_rule_update_copy_tag(struct mlx5hws_rule *rule,
+                        struct mlx5hws_wqe_gta_data_seg_ste *wqe_data,
+                        bool is_jumbo)
+{
+       struct mlx5hws_rule_match_tag *tag;
+
+       if (!mlx5hws_matcher_is_resizable(rule->matcher)) {
+               tag = &rule->tag;
+       } else {
+               struct mlx5hws_wqe_gta_data_seg_ste *data_seg =
+                       (struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg;
+               tag = (struct mlx5hws_rule_match_tag *)(void *)data_seg->action;
+       }
+
+       if (is_jumbo)
+               memcpy(wqe_data->jumbo, tag->jumbo, MLX5HWS_JUMBO_TAG_SZ);
+       else
+               memcpy(wqe_data->tag, tag->match, MLX5HWS_MATCH_TAG_SZ);
+}
+
+static void hws_rule_init_dep_wqe(struct mlx5hws_send_ring_dep_wqe *dep_wqe,
+                                 struct mlx5hws_rule *rule,
+                                 struct mlx5hws_match_template *mt,
+                                 struct mlx5hws_rule_attr *attr)
+{
+       struct mlx5hws_matcher *matcher = rule->matcher;
+       struct mlx5hws_table *tbl = matcher->tbl;
+       bool skip_rx, skip_tx;
+
+       dep_wqe->rule = rule;
+       dep_wqe->user_data = attr->user_data;
+       dep_wqe->direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ?
+                               attr->rule_idx : 0;
+
+       if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+               hws_rule_skip(matcher, mt, attr->flow_source, &skip_rx, &skip_tx);
+
+               if (!skip_rx) {
+                       dep_wqe->rtc_0 = matcher->match_ste.rtc_0_id;
+                       dep_wqe->retry_rtc_0 = matcher->col_matcher ?
+                                              matcher->col_matcher->match_ste.rtc_0_id : 0;
+               } else {
+                       dep_wqe->rtc_0 = 0;
+                       dep_wqe->retry_rtc_0 = 0;
+               }
+
+               if (!skip_tx) {
+                       dep_wqe->rtc_1 = matcher->match_ste.rtc_1_id;
+                       dep_wqe->retry_rtc_1 = matcher->col_matcher ?
+                                              matcher->col_matcher->match_ste.rtc_1_id : 0;
+               } else {
+                       dep_wqe->rtc_1 = 0;
+                       dep_wqe->retry_rtc_1 = 0;
+               }
+       } else {
+               pr_warn("HWS: invalid tbl->type: %d\n", tbl->type);
+       }
+}
+
+static void hws_rule_move_get_rtc(struct mlx5hws_rule *rule,
+                                 struct mlx5hws_send_ste_attr *ste_attr)
+{
+       struct mlx5hws_matcher *dst_matcher = rule->matcher->resize_dst;
+
+       if (rule->resize_info->rtc_0) {
+               ste_attr->rtc_0 = dst_matcher->match_ste.rtc_0_id;
+               ste_attr->retry_rtc_0 = dst_matcher->col_matcher ?
+                                       dst_matcher->col_matcher->match_ste.rtc_0_id : 0;
+       }
+       if (rule->resize_info->rtc_1) {
+               ste_attr->rtc_1 = dst_matcher->match_ste.rtc_1_id;
+               ste_attr->retry_rtc_1 = dst_matcher->col_matcher ?
+                                       dst_matcher->col_matcher->match_ste.rtc_1_id : 0;
+       }
+}
+
+static void hws_rule_gen_comp(struct mlx5hws_send_engine *queue,
+                             struct mlx5hws_rule *rule,
+                             bool err,
+                             void *user_data,
+                             enum mlx5hws_rule_status rule_status_on_succ)
+{
+       enum mlx5hws_flow_op_status comp_status;
+
+       if (!err) {
+               comp_status = MLX5HWS_FLOW_OP_SUCCESS;
+               rule->status = rule_status_on_succ;
+       } else {
+               comp_status = MLX5HWS_FLOW_OP_ERROR;
+               rule->status = MLX5HWS_RULE_STATUS_FAILED;
+       }
+
+       mlx5hws_send_engine_inc_rule(queue);
+       mlx5hws_send_engine_gen_comp(queue, user_data, comp_status);
+}
+
+static void
+hws_rule_save_resize_info(struct mlx5hws_rule *rule,
+                         struct mlx5hws_send_ste_attr *ste_attr,
+                         bool is_update)
+{
+       if (!mlx5hws_matcher_is_resizable(rule->matcher))
+               return;
+
+       if (likely(!is_update)) {
+               rule->resize_info = kzalloc(sizeof(*rule->resize_info), GFP_KERNEL);
+               if (unlikely(!rule->resize_info)) {
+                       pr_warn("HWS: resize info isn't allocated for rule\n");
+                       return;
+               }
+
+               rule->resize_info->max_stes =
+                       rule->matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes;
+               rule->resize_info->action_ste_pool[0] = rule->matcher->action_ste[0].max_stes ?
+                                                       rule->matcher->action_ste[0].pool :
+                                                       NULL;
+               rule->resize_info->action_ste_pool[1] = rule->matcher->action_ste[1].max_stes ?
+                                                       rule->matcher->action_ste[1].pool :
+                                                       NULL;
+       }
+
+       memcpy(rule->resize_info->ctrl_seg, ste_attr->wqe_ctrl,
+              sizeof(rule->resize_info->ctrl_seg));
+       memcpy(rule->resize_info->data_seg, ste_attr->wqe_data,
+              sizeof(rule->resize_info->data_seg));
+}
+
+void mlx5hws_rule_clear_resize_info(struct mlx5hws_rule *rule)
+{
+       if (mlx5hws_matcher_is_resizable(rule->matcher) &&
+           rule->resize_info) {
+               kfree(rule->resize_info);
+               rule->resize_info = NULL;
+       }
+}
+
+static void
+hws_rule_save_delete_info(struct mlx5hws_rule *rule,
+                         struct mlx5hws_send_ste_attr *ste_attr)
+{
+       struct mlx5hws_match_template *mt = rule->matcher->mt;
+       bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
+
+       if (mlx5hws_matcher_is_resizable(rule->matcher))
+               return;
+
+       if (is_jumbo)
+               memcpy(&rule->tag.jumbo, ste_attr->wqe_data->jumbo, MLX5HWS_JUMBO_TAG_SZ);
+       else
+               memcpy(&rule->tag.match, ste_attr->wqe_data->tag, MLX5HWS_MATCH_TAG_SZ);
+}
+
+static void
+hws_rule_clear_delete_info(struct mlx5hws_rule *rule)
+{
+       /* nothing to do here */
+}
+
+static void
+hws_rule_load_delete_info(struct mlx5hws_rule *rule,
+                         struct mlx5hws_send_ste_attr *ste_attr)
+{
+       if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher))) {
+               ste_attr->wqe_tag = &rule->tag;
+       } else {
+               struct mlx5hws_wqe_gta_data_seg_ste *data_seg =
+                       (struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg;
+               struct mlx5hws_rule_match_tag *tag =
+                       (struct mlx5hws_rule_match_tag *)(void *)data_seg->action;
+               ste_attr->wqe_tag = tag;
+       }
+}
+
+static int hws_rule_alloc_action_ste_idx(struct mlx5hws_rule *rule,
+                                        u8 action_ste_selector)
+{
+       struct mlx5hws_matcher *matcher = rule->matcher;
+       struct mlx5hws_matcher_action_ste *action_ste;
+       struct mlx5hws_pool_chunk ste = {0};
+       int ret;
+
+       action_ste = &matcher->action_ste[action_ste_selector];
+       ste.order = ilog2(roundup_pow_of_two(action_ste->max_stes));
+       ret = mlx5hws_pool_chunk_alloc(action_ste->pool, &ste);
+       if (unlikely(ret)) {
+               mlx5hws_err(matcher->tbl->ctx,
+                           "Failed to allocate STE for rule actions");
+               return ret;
+       }
+       rule->action_ste_idx = ste.offset;
+
+       return 0;
+}
+
+static void hws_rule_free_action_ste_idx(struct mlx5hws_rule *rule,
+                                        u8 action_ste_selector)
+{
+       struct mlx5hws_matcher *matcher = rule->matcher;
+       struct mlx5hws_pool_chunk ste = {0};
+       struct mlx5hws_pool *pool;
+       u8 max_stes;
+
+       if (mlx5hws_matcher_is_resizable(matcher)) {
+               /* Free the original action pool if rule was resized */
+               max_stes = rule->resize_info->max_stes;
+               pool = rule->resize_info->action_ste_pool[action_ste_selector];
+       } else {
+               max_stes = matcher->action_ste[action_ste_selector].max_stes;
+               pool = matcher->action_ste[action_ste_selector].pool;
+       }
+
+       /* This release is safe only when the rule match part was deleted */
+       ste.order = ilog2(roundup_pow_of_two(max_stes));
+       ste.offset = rule->action_ste_idx;
+
+       mlx5hws_pool_chunk_free(pool, &ste);
+}
+
+static int hws_rule_alloc_action_ste(struct mlx5hws_rule *rule,
+                                    struct mlx5hws_rule_attr *attr)
+{
+       int action_ste_idx;
+       int ret;
+
+       ret = hws_rule_alloc_action_ste_idx(rule, 0);
+       if (unlikely(ret))
+               return ret;
+
+       action_ste_idx = rule->action_ste_idx;
+
+       ret = hws_rule_alloc_action_ste_idx(rule, 1);
+       if (unlikely(ret)) {
+               hws_rule_free_action_ste_idx(rule, 0);
+               return ret;
+       }
+
+       /* Both pools have to return the same index */
+       if (unlikely(rule->action_ste_idx != action_ste_idx)) {
+               pr_warn("HWS: allocation of action STE failed - pool indexes mismatch\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule)
+{
+       if (rule->action_ste_idx > -1) {
+               hws_rule_free_action_ste_idx(rule, 1);
+               hws_rule_free_action_ste_idx(rule, 0);
+       }
+}
+
+static void hws_rule_create_init(struct mlx5hws_rule *rule,
+                                struct mlx5hws_send_ste_attr *ste_attr,
+                                struct mlx5hws_actions_apply_data *apply,
+                                bool is_update)
+{
+       struct mlx5hws_matcher *matcher = rule->matcher;
+       struct mlx5hws_table *tbl = matcher->tbl;
+       struct mlx5hws_context *ctx = tbl->ctx;
+
+       /* Init rule before reuse */
+       if (!is_update) {
+               /* In update we use these rtc's */
+               rule->rtc_0 = 0;
+               rule->rtc_1 = 0;
+               rule->action_ste_selector = 0;
+       } else {
+               rule->action_ste_selector = !rule->action_ste_selector;
+       }
+
+       rule->pending_wqes = 0;
+       rule->action_ste_idx = -1;
+       rule->status = MLX5HWS_RULE_STATUS_CREATING;
+
+       /* Init default send STE attributes */
+       ste_attr->gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+       ste_attr->send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+       ste_attr->send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+       ste_attr->send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+
+       /* Init default action apply */
+       apply->tbl_type = tbl->type;
+       apply->common_res = &ctx->common_res[tbl->type];
+       apply->jump_to_action_stc = matcher->action_ste[0].stc.offset;
+       apply->require_dep = 0;
+}
+
+static void hws_rule_move_init(struct mlx5hws_rule *rule,
+                              struct mlx5hws_rule_attr *attr)
+{
+       /* Save the old RTC IDs to be later used in match STE delete */
+       rule->resize_info->rtc_0 = rule->rtc_0;
+       rule->resize_info->rtc_1 = rule->rtc_1;
+       rule->resize_info->rule_idx = attr->rule_idx;
+
+       rule->rtc_0 = 0;
+       rule->rtc_1 = 0;
+
+       rule->pending_wqes = 0;
+       rule->action_ste_idx = -1;
+       rule->action_ste_selector = 0;
+       rule->status = MLX5HWS_RULE_STATUS_CREATING;
+       rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_WRITING;
+}
+
+bool mlx5hws_rule_move_in_progress(struct mlx5hws_rule *rule)
+{
+       return mlx5hws_matcher_is_in_resize(rule->matcher) &&
+              rule->resize_info &&
+              rule->resize_info->state != MLX5HWS_RULE_RESIZE_STATE_IDLE;
+}
+
+static int hws_rule_create_hws(struct mlx5hws_rule *rule,
+                              struct mlx5hws_rule_attr *attr,
+                              u8 mt_idx,
+                              u32 *match_param,
+                              u8 at_idx,
+                              struct mlx5hws_rule_action rule_actions[])
+{
+       struct mlx5hws_action_template *at = &rule->matcher->at[at_idx];
+       struct mlx5hws_match_template *mt = &rule->matcher->mt[mt_idx];
+       bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
+       struct mlx5hws_matcher *matcher = rule->matcher;
+       struct mlx5hws_context *ctx = matcher->tbl->ctx;
+       struct mlx5hws_send_ste_attr ste_attr = {0};
+       struct mlx5hws_send_ring_dep_wqe *dep_wqe;
+       struct mlx5hws_actions_wqe_setter *setter;
+       struct mlx5hws_actions_apply_data apply;
+       struct mlx5hws_send_engine *queue;
+       u8 total_stes, action_stes;
+       bool is_update;
+       int i, ret;
+
+       is_update = !match_param;
+
+       setter = &at->setters[at->num_of_action_stes];
+       total_stes = at->num_of_action_stes + (is_jumbo && !at->only_term);
+       action_stes = total_stes - 1;
+
+       queue = &ctx->send_queue[attr->queue_id];
+       if (unlikely(mlx5hws_send_engine_err(queue)))
+               return -EIO;
+
+       hws_rule_create_init(rule, &ste_attr, &apply, is_update);
+
+       /* Allocate dependent match WQE since rule might have dependent writes.
+        * The queued dependent WQE can be later aborted or kept as a dependency.
+        * dep_wqe buffers (ctrl, data) are also reused for all STE writes.
+        */
+       dep_wqe = mlx5hws_send_add_new_dep_wqe(queue);
+       hws_rule_init_dep_wqe(dep_wqe, rule, mt, attr);
+
+       ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
+       ste_attr.wqe_data = &dep_wqe->wqe_data;
+       apply.wqe_ctrl = &dep_wqe->wqe_ctrl;
+       apply.wqe_data = (__force __be32 *)&dep_wqe->wqe_data;
+       apply.rule_action = rule_actions;
+       apply.queue = queue;
+
+       if (action_stes) {
+               /* Allocate action STEs for rules that need more than match STE */
+               if (!is_update) {
+                       ret = hws_rule_alloc_action_ste(rule, attr);
+                       if (ret) {
+                               mlx5hws_err(ctx, "Failed to allocate action memory %d", ret);
+                               mlx5hws_send_abort_new_dep_wqe(queue);
+                               return ret;
+                       }
+               }
+               /* Skip RX/TX based on the dep_wqe init */
+               ste_attr.rtc_0 = dep_wqe->rtc_0 ?
+                                matcher->action_ste[rule->action_ste_selector].rtc_0_id : 0;
+               ste_attr.rtc_1 = dep_wqe->rtc_1 ?
+                                matcher->action_ste[rule->action_ste_selector].rtc_1_id : 0;
+               /* Action STEs are written to a specific index last to first */
+               ste_attr.direct_index = rule->action_ste_idx + action_stes;
+               apply.next_direct_idx = ste_attr.direct_index;
+       } else {
+               apply.next_direct_idx = 0;
+       }
+
+       for (i = total_stes; i-- > 0;) {
+               mlx5hws_action_apply_setter(&apply, setter--, !i && is_jumbo);
+
+               if (i == 0) {
+                       /* Handle last match STE.
+                        * For hash split / linear lookup RTCs, packets reaching any STE
+                        * will always match and perform the specified actions, which
+                        * makes the tag irrelevant.
+                        */
+                       if (likely(!mlx5hws_matcher_is_insert_by_idx(matcher) && !is_update))
+                               mlx5hws_definer_create_tag(match_param, mt->fc, mt->fc_sz,
+                                                          (u8 *)dep_wqe->wqe_data.action);
+                       else if (is_update)
+                               hws_rule_update_copy_tag(rule, &dep_wqe->wqe_data, is_jumbo);
+
+                       /* Rule has dependent WQEs, match dep_wqe is queued */
+                       if (action_stes || apply.require_dep)
+                               break;
+
+                       /* Rule has no dependencies, abort dep_wqe and send WQE now */
+                       mlx5hws_send_abort_new_dep_wqe(queue);
+                       ste_attr.wqe_tag_is_jumbo = is_jumbo;
+                       ste_attr.send_attr.notify_hw = !attr->burst;
+                       ste_attr.send_attr.user_data = dep_wqe->user_data;
+                       ste_attr.send_attr.rule = dep_wqe->rule;
+                       ste_attr.rtc_0 = dep_wqe->rtc_0;
+                       ste_attr.rtc_1 = dep_wqe->rtc_1;
+                       ste_attr.used_id_rtc_0 = &rule->rtc_0;
+                       ste_attr.used_id_rtc_1 = &rule->rtc_1;
+                       ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
+                       ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
+                       ste_attr.direct_index = dep_wqe->direct_index;
+               } else {
+                       apply.next_direct_idx = --ste_attr.direct_index;
+               }
+
+               mlx5hws_send_ste(queue, &ste_attr);
+       }
+
+       /* Backup TAG on the rule for deletion and resize info for
+        * moving rules to a new matcher, only after insertion.
+        */
+       if (!is_update)
+               hws_rule_save_delete_info(rule, &ste_attr);
+
+       hws_rule_save_resize_info(rule, &ste_attr, is_update);
+       mlx5hws_send_engine_inc_rule(queue);
+
+       if (!attr->burst)
+               mlx5hws_send_all_dep_wqe(queue);
+
+       return 0;
+}
+
+static void hws_rule_destroy_failed_hws(struct mlx5hws_rule *rule,
+                                       struct mlx5hws_rule_attr *attr)
+{
+       struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+       struct mlx5hws_send_engine *queue;
+
+       queue = &ctx->send_queue[attr->queue_id];
+
+       hws_rule_gen_comp(queue, rule, false,
+                         attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
+
+       /* Rule failed now we can safely release action STEs */
+       mlx5hws_rule_free_action_ste(rule);
+
+       /* Clear complex tag */
+       hws_rule_clear_delete_info(rule);
+
+       /* Clear info that was saved for resizing */
+       mlx5hws_rule_clear_resize_info(rule);
+
+       /* If a rule that was indicated as burst (need to trigger HW) has failed
+        * insertion we won't ring the HW as nothing is being written to the WQ.
+        * In such case update the last WQE and ring the HW with that work
+        */
+       if (attr->burst)
+               return;
+
+       mlx5hws_send_all_dep_wqe(queue);
+       mlx5hws_send_engine_flush_queue(queue);
+}
+
+static int hws_rule_destroy_hws(struct mlx5hws_rule *rule,
+                               struct mlx5hws_rule_attr *attr)
+{
+       bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
+       struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+       struct mlx5hws_matcher *matcher = rule->matcher;
+       struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl = {0};
+       struct mlx5hws_send_ste_attr ste_attr = {0};
+       struct mlx5hws_send_engine *queue;
+
+       queue = &ctx->send_queue[attr->queue_id];
+
+       if (unlikely(mlx5hws_send_engine_err(queue))) {
+               hws_rule_destroy_failed_hws(rule, attr);
+               return 0;
+       }
+
+       /* Rule is not completed yet */
+       if (rule->status == MLX5HWS_RULE_STATUS_CREATING)
+               return -EBUSY;
+
+       /* Rule failed and doesn't require cleanup */
+       if (rule->status == MLX5HWS_RULE_STATUS_FAILED) {
+               hws_rule_destroy_failed_hws(rule, attr);
+               return 0;
+       }
+
+       if (rule->skip_delete) {
+               /* Rule shouldn't be deleted in HW.
+                * Generate completion as if write succeeded, and we can
+                * safely release action STEs and clear resize info.
+                */
+               hws_rule_gen_comp(queue, rule, false,
+                                 attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
+
+               mlx5hws_rule_free_action_ste(rule);
+               mlx5hws_rule_clear_resize_info(rule);
+               return 0;
+       }
+
+       mlx5hws_send_engine_inc_rule(queue);
+
+       /* Send dependent WQE */
+       if (!attr->burst)
+               mlx5hws_send_all_dep_wqe(queue);
+
+       rule->status = MLX5HWS_RULE_STATUS_DELETING;
+
+       ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+       ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+       ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+
+       ste_attr.send_attr.rule = rule;
+       ste_attr.send_attr.notify_hw = !attr->burst;
+       ste_attr.send_attr.user_data = attr->user_data;
+
+       ste_attr.rtc_0 = rule->rtc_0;
+       ste_attr.rtc_1 = rule->rtc_1;
+       ste_attr.used_id_rtc_0 = &rule->rtc_0;
+       ste_attr.used_id_rtc_1 = &rule->rtc_1;
+       ste_attr.wqe_ctrl = &wqe_ctrl;
+       ste_attr.wqe_tag_is_jumbo = is_jumbo;
+       ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE;
+       if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher)))
+               ste_attr.direct_index = attr->rule_idx;
+
+       hws_rule_load_delete_info(rule, &ste_attr);
+       mlx5hws_send_ste(queue, &ste_attr);
+       hws_rule_clear_delete_info(rule);
+
+       return 0;
+}
+
+static int hws_rule_enqueue_precheck(struct mlx5hws_rule *rule,
+                                    struct mlx5hws_rule_attr *attr)
+{
+       struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+
+       if (unlikely(!attr->user_data))
+               return -EINVAL;
+
+       /* Check if there is room in queue */
+       if (unlikely(mlx5hws_send_engine_full(&ctx->send_queue[attr->queue_id])))
+               return -EBUSY;
+
+       return 0;
+}
+
+static int hws_rule_enqueue_precheck_move(struct mlx5hws_rule *rule,
+                                         struct mlx5hws_rule_attr *attr)
+{
+       if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED))
+               return -EINVAL;
+
+       return hws_rule_enqueue_precheck(rule, attr);
+}
+
+static int hws_rule_enqueue_precheck_create(struct mlx5hws_rule *rule,
+                                           struct mlx5hws_rule_attr *attr)
+{
+       if (unlikely(mlx5hws_matcher_is_in_resize(rule->matcher)))
+               /* Matcher in resize - new rules are not allowed */
+               return -EAGAIN;
+
+       return hws_rule_enqueue_precheck(rule, attr);
+}
+
+static int hws_rule_enqueue_precheck_update(struct mlx5hws_rule *rule,
+                                           struct mlx5hws_rule_attr *attr)
+{
+       struct mlx5hws_matcher *matcher = rule->matcher;
+
+       if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher) &&
+                    !matcher->attr.optimize_using_rule_idx &&
+                    !mlx5hws_matcher_is_insert_by_idx(matcher))) {
+               return -EOPNOTSUPP;
+       }
+
+       if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED))
+               return -EBUSY;
+
+       return hws_rule_enqueue_precheck_create(rule, attr);
+}
+
+int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
+                                void *queue_ptr,
+                                void *user_data)
+{
+       bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
+       struct mlx5hws_wqe_gta_ctrl_seg empty_wqe_ctrl = {0};
+       struct mlx5hws_matcher *matcher = rule->matcher;
+       struct mlx5hws_send_engine *queue = queue_ptr;
+       struct mlx5hws_send_ste_attr ste_attr = {0};
+
+       mlx5hws_send_all_dep_wqe(queue);
+
+       rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_DELETING;
+
+       ste_attr.send_attr.fence = 0;
+       ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+       ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+       ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+       ste_attr.send_attr.rule = rule;
+       ste_attr.send_attr.notify_hw = 1;
+       ste_attr.send_attr.user_data = user_data;
+       ste_attr.rtc_0 = rule->resize_info->rtc_0;
+       ste_attr.rtc_1 = rule->resize_info->rtc_1;
+       ste_attr.used_id_rtc_0 = &rule->resize_info->rtc_0;
+       ste_attr.used_id_rtc_1 = &rule->resize_info->rtc_1;
+       ste_attr.wqe_ctrl = &empty_wqe_ctrl;
+       ste_attr.wqe_tag_is_jumbo = is_jumbo;
+       ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE;
+
+       if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher)))
+               ste_attr.direct_index = rule->resize_info->rule_idx;
+
+       hws_rule_load_delete_info(rule, &ste_attr);
+       mlx5hws_send_ste(queue, &ste_attr);
+
+       return 0;
+}
+
+int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule,
+                             struct mlx5hws_rule_attr *attr)
+{
+       bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
+       struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+       struct mlx5hws_matcher *matcher = rule->matcher;
+       struct mlx5hws_send_ste_attr ste_attr = {0};
+       struct mlx5hws_send_engine *queue;
+       int ret;
+
+       ret = hws_rule_enqueue_precheck_move(rule, attr);
+       if (unlikely(ret))
+               return ret;
+
+       queue = &ctx->send_queue[attr->queue_id];
+
+       ret = mlx5hws_send_engine_err(queue);
+       if (ret)
+               return ret;
+
+       hws_rule_move_init(rule, attr);
+       hws_rule_move_get_rtc(rule, &ste_attr);
+
+       ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+       ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+       ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+       ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+       ste_attr.wqe_tag_is_jumbo = is_jumbo;
+
+       ste_attr.send_attr.rule = rule;
+       ste_attr.send_attr.fence = 0;
+       ste_attr.send_attr.notify_hw = !attr->burst;
+       ste_attr.send_attr.user_data = attr->user_data;
+
+       ste_attr.used_id_rtc_0 = &rule->rtc_0;
+       ste_attr.used_id_rtc_1 = &rule->rtc_1;
+       ste_attr.wqe_ctrl = (struct mlx5hws_wqe_gta_ctrl_seg *)rule->resize_info->ctrl_seg;
+       ste_attr.wqe_data = (struct mlx5hws_wqe_gta_data_seg_ste *)rule->resize_info->data_seg;
+       ste_attr.direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ?
+                               attr->rule_idx : 0;
+
+       mlx5hws_send_ste(queue, &ste_attr);
+       mlx5hws_send_engine_inc_rule(queue);
+
+       if (!attr->burst)
+               mlx5hws_send_all_dep_wqe(queue);
+
+       return 0;
+}
+
+int mlx5hws_rule_create(struct mlx5hws_matcher *matcher,
+                       u8 mt_idx,
+                       u32 *match_param,
+                       u8 at_idx,
+                       struct mlx5hws_rule_action rule_actions[],
+                       struct mlx5hws_rule_attr *attr,
+                       struct mlx5hws_rule *rule_handle)
+{
+       int ret;
+
+       rule_handle->matcher = matcher;
+
+       ret = hws_rule_enqueue_precheck_create(rule_handle, attr);
+       if (unlikely(ret))
+               return ret;
+
+       if (unlikely(!(matcher->num_of_mt >= mt_idx) ||
+                    !(matcher->num_of_at >= at_idx) ||
+                    !match_param)) {
+               pr_warn("HWS: Invalid rule creation parameters (MTs, ATs or match params)\n");
+               return -EINVAL;
+       }
+
+       ret = hws_rule_create_hws(rule_handle,
+                                 attr,
+                                 mt_idx,
+                                 match_param,
+                                 at_idx,
+                                 rule_actions);
+
+       return ret;
+}
+
+int mlx5hws_rule_destroy(struct mlx5hws_rule *rule,
+                        struct mlx5hws_rule_attr *attr)
+{
+       int ret;
+
+       ret = hws_rule_enqueue_precheck(rule, attr);
+       if (unlikely(ret))
+               return ret;
+
+       ret = hws_rule_destroy_hws(rule, attr);
+
+       return ret;
+}
+
+int mlx5hws_rule_action_update(struct mlx5hws_rule *rule,
+                              u8 at_idx,
+                              struct mlx5hws_rule_action rule_actions[],
+                              struct mlx5hws_rule_attr *attr)
+{
+       int ret;
+
+       ret = hws_rule_enqueue_precheck_update(rule, attr);
+       if (unlikely(ret))
+               return ret;
+
+       ret = hws_rule_create_hws(rule,
+                                 attr,
+                                 0,
+                                 NULL,
+                                 at_idx,
+                                 rule_actions);
+
+       return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
new file mode 100644 (file)
index 0000000..495cdd1
--- /dev/null
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_RULE_H_
+#define MLX5HWS_RULE_H_
+
+enum {
+       MLX5HWS_STE_CTRL_SZ = 20,
+       MLX5HWS_ACTIONS_SZ = 12,
+       MLX5HWS_MATCH_TAG_SZ = 32,
+       MLX5HWS_JUMBO_TAG_SZ = 44,
+};
+
+enum mlx5hws_rule_status {
+       MLX5HWS_RULE_STATUS_UNKNOWN,
+       MLX5HWS_RULE_STATUS_CREATING,
+       MLX5HWS_RULE_STATUS_CREATED,
+       MLX5HWS_RULE_STATUS_DELETING,
+       MLX5HWS_RULE_STATUS_DELETED,
+       MLX5HWS_RULE_STATUS_FAILING,
+       MLX5HWS_RULE_STATUS_FAILED,
+};
+
+enum mlx5hws_rule_move_state {
+       MLX5HWS_RULE_RESIZE_STATE_IDLE,
+       MLX5HWS_RULE_RESIZE_STATE_WRITING,
+       MLX5HWS_RULE_RESIZE_STATE_DELETING,
+};
+
+enum mlx5hws_rule_jumbo_match_tag_offset {
+       MLX5HWS_RULE_JUMBO_MATCH_TAG_OFFSET_DW0 = 8,
+};
+
+struct mlx5hws_rule_match_tag {
+       union {
+               u8 jumbo[MLX5HWS_JUMBO_TAG_SZ];
+               struct {
+                       u8 reserved[MLX5HWS_ACTIONS_SZ];
+                       u8 match[MLX5HWS_MATCH_TAG_SZ];
+               };
+       };
+};
+
+struct mlx5hws_rule_resize_info {
+       struct mlx5hws_pool *action_ste_pool[2];
+       u32 rtc_0;
+       u32 rtc_1;
+       u32 rule_idx;
+       u8 state;
+       u8 max_stes;
+       u8 ctrl_seg[MLX5HWS_WQE_SZ_GTA_CTRL]; /* Ctrl segment of STE: 48 bytes */
+       u8 data_seg[MLX5HWS_WQE_SZ_GTA_DATA]; /* Data segment of STE: 64 bytes */
+};
+
+struct mlx5hws_rule {
+       struct mlx5hws_matcher *matcher;
+       union {
+               struct mlx5hws_rule_match_tag tag;
+               struct mlx5hws_rule_resize_info *resize_info;
+       };
+       u32 rtc_0; /* The RTC into which the STE was inserted */
+       u32 rtc_1; /* The RTC into which the STE was inserted */
+       int action_ste_idx; /* STE array index */
+       u8 status; /* enum mlx5hws_rule_status */
+       u8 action_ste_selector; /* For rule update - which action STE is in use */
+       u8 pending_wqes;
+       bool skip_delete; /* For complex rules - another rule with same tag
+                          * still exists, so don't actually delete this rule.
+                          */
+};
+
+void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule);
+
+int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
+                                void *queue, void *user_data);
+
+int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule,
+                             struct mlx5hws_rule_attr *attr);
+
+bool mlx5hws_rule_move_in_progress(struct mlx5hws_rule *rule);
+
+void mlx5hws_rule_clear_resize_info(struct mlx5hws_rule *rule);
+
+#endif /* MLX5HWS_RULE_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
new file mode 100644 (file)
index 0000000..424797b
--- /dev/null
@@ -0,0 +1,1231 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+#include "lib/clock.h"
+
+enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 };
+
+struct mlx5hws_send_ring_dep_wqe *
+mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue)
+{
+       struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
+       unsigned int idx = send_sq->head_dep_idx++ & (queue->num_entries - 1);
+
+       memset(&send_sq->dep_wqe[idx].wqe_data.tag, 0, MLX5HWS_MATCH_TAG_SZ);
+
+       return &send_sq->dep_wqe[idx];
+}
+
+void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue)
+{
+       queue->send_ring.send_sq.head_dep_idx--;
+}
+
+void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue)
+{
+       struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
+       struct mlx5hws_send_ste_attr ste_attr = {0};
+       struct mlx5hws_send_ring_dep_wqe *dep_wqe;
+
+       ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+       ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+       ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+       ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+
+       /* Fence first from previous depend WQEs  */
+       ste_attr.send_attr.fence = 1;
+
+       while (send_sq->head_dep_idx != send_sq->tail_dep_idx) {
+               dep_wqe = &send_sq->dep_wqe[send_sq->tail_dep_idx++ & (queue->num_entries - 1)];
+
+               /* Notify HW on the last WQE */
+               ste_attr.send_attr.notify_hw = (send_sq->tail_dep_idx == send_sq->head_dep_idx);
+               ste_attr.send_attr.user_data = dep_wqe->user_data;
+               ste_attr.send_attr.rule = dep_wqe->rule;
+
+               ste_attr.rtc_0 = dep_wqe->rtc_0;
+               ste_attr.rtc_1 = dep_wqe->rtc_1;
+               ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
+               ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
+               ste_attr.used_id_rtc_0 = &dep_wqe->rule->rtc_0;
+               ste_attr.used_id_rtc_1 = &dep_wqe->rule->rtc_1;
+               ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
+               ste_attr.wqe_data = &dep_wqe->wqe_data;
+               ste_attr.direct_index = dep_wqe->direct_index;
+
+               mlx5hws_send_ste(queue, &ste_attr);
+
+               /* Fencing is done only on the first WQE */
+               ste_attr.send_attr.fence = 0;
+       }
+}
+
+struct mlx5hws_send_engine_post_ctrl
+mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue)
+{
+       struct mlx5hws_send_engine_post_ctrl ctrl;
+
+       ctrl.queue = queue;
+       /* Currently only one send ring is supported */
+       ctrl.send_ring = &queue->send_ring;
+       ctrl.num_wqebbs = 0;
+
+       return ctrl;
+}
+
+void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl,
+                                     char **buf, size_t *len)
+{
+       struct mlx5hws_send_ring_sq *send_sq = &ctrl->send_ring->send_sq;
+       unsigned int idx;
+
+       idx = (send_sq->cur_post + ctrl->num_wqebbs) & send_sq->buf_mask;
+
+       /* Note that *buf is a single MLX5_SEND_WQE_BB. It cannot be used
+        * as buffer of more than one WQE_BB, since the two MLX5_SEND_WQE_BB
+        * can be on 2 different kernel memory pages.
+        */
+       *buf = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
+       *len = MLX5_SEND_WQE_BB;
+
+       if (!ctrl->num_wqebbs) {
+               *buf += sizeof(struct mlx5hws_wqe_ctrl_seg);
+               *len -= sizeof(struct mlx5hws_wqe_ctrl_seg);
+       }
+
+       ctrl->num_wqebbs++;
+}
+
+static void hws_send_engine_post_ring(struct mlx5hws_send_ring_sq *sq,
+                                     struct mlx5hws_wqe_ctrl_seg *doorbell_cseg)
+{
+       /* ensure wqe is visible to device before updating doorbell record */
+       dma_wmb();
+
+       *sq->wq.db = cpu_to_be32(sq->cur_post);
+
+       /* ensure doorbell record is visible to device before ringing the
+        * doorbell
+        */
+       wmb();
+
+       mlx5_write64((__be32 *)doorbell_cseg, sq->uar_map);
+
+       /* Ensure doorbell is written on uar_page before poll_cq */
+       WRITE_ONCE(doorbell_cseg, NULL);
+}
+
+static void
+hws_send_wqe_set_tag(struct mlx5hws_wqe_gta_data_seg_ste *wqe_data,
+                    struct mlx5hws_rule_match_tag *tag,
+                    bool is_jumbo)
+{
+       if (is_jumbo) {
+               /* Clear previous possibly dirty control */
+               memset(wqe_data, 0, MLX5HWS_STE_CTRL_SZ);
+               memcpy(wqe_data->jumbo, tag->jumbo, MLX5HWS_JUMBO_TAG_SZ);
+       } else {
+               /* Clear previous possibly dirty control and actions */
+               memset(wqe_data, 0, MLX5HWS_STE_CTRL_SZ + MLX5HWS_ACTIONS_SZ);
+               memcpy(wqe_data->tag, tag->match, MLX5HWS_MATCH_TAG_SZ);
+       }
+}
+
+void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl,
+                                 struct mlx5hws_send_engine_post_attr *attr)
+{
+       struct mlx5hws_wqe_ctrl_seg *wqe_ctrl;
+       struct mlx5hws_send_ring_sq *sq;
+       unsigned int idx;
+       u32 flags = 0;
+
+       sq = &ctrl->send_ring->send_sq;
+       idx = sq->cur_post & sq->buf_mask;
+       sq->last_idx = idx;
+
+       wqe_ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, idx);
+
+       wqe_ctrl->opmod_idx_opcode =
+               cpu_to_be32((attr->opmod << 24) |
+                           ((sq->cur_post & 0xffff) << 8) |
+                           attr->opcode);
+       wqe_ctrl->qpn_ds =
+               cpu_to_be32((attr->len + sizeof(struct mlx5hws_wqe_ctrl_seg)) / 16 |
+                                sq->sqn << 8);
+       wqe_ctrl->imm = cpu_to_be32(attr->id);
+
+       flags |= attr->notify_hw ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
+       flags |= attr->fence ? MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE : 0;
+       wqe_ctrl->flags = cpu_to_be32(flags);
+
+       sq->wr_priv[idx].id = attr->id;
+       sq->wr_priv[idx].retry_id = attr->retry_id;
+
+       sq->wr_priv[idx].rule = attr->rule;
+       sq->wr_priv[idx].user_data = attr->user_data;
+       sq->wr_priv[idx].num_wqebbs = ctrl->num_wqebbs;
+
+       if (attr->rule) {
+               sq->wr_priv[idx].rule->pending_wqes++;
+               sq->wr_priv[idx].used_id = attr->used_id;
+       }
+
+       sq->cur_post += ctrl->num_wqebbs;
+
+       if (attr->notify_hw)
+               hws_send_engine_post_ring(sq, wqe_ctrl);
+}
+
+static void hws_send_wqe(struct mlx5hws_send_engine *queue,
+                        struct mlx5hws_send_engine_post_attr *send_attr,
+                        struct mlx5hws_wqe_gta_ctrl_seg *send_wqe_ctrl,
+                        void *send_wqe_data,
+                        void *send_wqe_tag,
+                        bool is_jumbo,
+                        u8 gta_opcode,
+                        u32 direct_index)
+{
+       struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
+       struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+       struct mlx5hws_send_engine_post_ctrl ctrl;
+       size_t wqe_len;
+
+       ctrl = mlx5hws_send_engine_post_start(queue);
+       mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+       mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_data, &wqe_len);
+
+       wqe_ctrl->op_dirix = cpu_to_be32(gta_opcode << 28 | direct_index);
+       memcpy(wqe_ctrl->stc_ix, send_wqe_ctrl->stc_ix,
+              sizeof(send_wqe_ctrl->stc_ix));
+
+       if (send_wqe_data)
+               memcpy(wqe_data, send_wqe_data, sizeof(*wqe_data));
+       else
+               hws_send_wqe_set_tag(wqe_data, send_wqe_tag, is_jumbo);
+
+       mlx5hws_send_engine_post_end(&ctrl, send_attr);
+}
+
+void mlx5hws_send_ste(struct mlx5hws_send_engine *queue,
+                     struct mlx5hws_send_ste_attr *ste_attr)
+{
+       struct mlx5hws_send_engine_post_attr *send_attr = &ste_attr->send_attr;
+       u8 notify_hw = send_attr->notify_hw;
+       u8 fence = send_attr->fence;
+
+       if (ste_attr->rtc_1) {
+               send_attr->id = ste_attr->rtc_1;
+               send_attr->used_id = ste_attr->used_id_rtc_1;
+               send_attr->retry_id = ste_attr->retry_rtc_1;
+               send_attr->fence = fence;
+               send_attr->notify_hw = notify_hw && !ste_attr->rtc_0;
+               hws_send_wqe(queue, send_attr,
+                            ste_attr->wqe_ctrl,
+                            ste_attr->wqe_data,
+                            ste_attr->wqe_tag,
+                            ste_attr->wqe_tag_is_jumbo,
+                            ste_attr->gta_opcode,
+                            ste_attr->direct_index);
+       }
+
+       if (ste_attr->rtc_0) {
+               send_attr->id = ste_attr->rtc_0;
+               send_attr->used_id = ste_attr->used_id_rtc_0;
+               send_attr->retry_id = ste_attr->retry_rtc_0;
+               send_attr->fence = fence && !ste_attr->rtc_1;
+               send_attr->notify_hw = notify_hw;
+               hws_send_wqe(queue, send_attr,
+                            ste_attr->wqe_ctrl,
+                            ste_attr->wqe_data,
+                            ste_attr->wqe_tag,
+                            ste_attr->wqe_tag_is_jumbo,
+                            ste_attr->gta_opcode,
+                            ste_attr->direct_index);
+       }
+
+       /* Restore to original requested values */
+       send_attr->notify_hw = notify_hw;
+       send_attr->fence = fence;
+}
+
+static void hws_send_engine_retry_post_send(struct mlx5hws_send_engine *queue,
+                                           struct mlx5hws_send_ring_priv *priv,
+                                           u16 wqe_cnt)
+{
+       struct mlx5hws_send_engine_post_attr send_attr = {0};
+       struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
+       struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+       struct mlx5hws_send_engine_post_ctrl ctrl;
+       struct mlx5hws_send_ring_sq *send_sq;
+       unsigned int idx;
+       size_t wqe_len;
+       char *p;
+
+       send_attr.rule = priv->rule;
+       send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+       send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+       send_attr.len = MLX5_SEND_WQE_BB * 2 - sizeof(struct mlx5hws_wqe_ctrl_seg);
+       send_attr.notify_hw = 1;
+       send_attr.fence = 0;
+       send_attr.user_data = priv->user_data;
+       send_attr.id = priv->retry_id;
+       send_attr.used_id = priv->used_id;
+
+       ctrl = mlx5hws_send_engine_post_start(queue);
+       mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+       mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_data, &wqe_len);
+
+       send_sq = &ctrl.send_ring->send_sq;
+       idx = wqe_cnt & send_sq->buf_mask;
+       p = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
+
+       /* Copy old gta ctrl */
+       memcpy(wqe_ctrl, p + sizeof(struct mlx5hws_wqe_ctrl_seg),
+              MLX5_SEND_WQE_BB - sizeof(struct mlx5hws_wqe_ctrl_seg));
+
+       idx = (wqe_cnt + 1) & send_sq->buf_mask;
+       p = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
+
+       /* Copy old gta data */
+       memcpy(wqe_data, p, MLX5_SEND_WQE_BB);
+
+       mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+}
+
+void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue)
+{
+       struct mlx5hws_send_ring_sq *sq = &queue->send_ring.send_sq;
+       struct mlx5hws_wqe_ctrl_seg *wqe_ctrl;
+
+       wqe_ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, sq->last_idx);
+       wqe_ctrl->flags |= cpu_to_be32(MLX5_WQE_CTRL_CQ_UPDATE);
+
+       hws_send_engine_post_ring(sq, wqe_ctrl);
+}
+
+static void
+hws_send_engine_update_rule_resize(struct mlx5hws_send_engine *queue,
+                                  struct mlx5hws_send_ring_priv *priv,
+                                  enum mlx5hws_flow_op_status *status)
+{
+       switch (priv->rule->resize_info->state) {
+       case MLX5HWS_RULE_RESIZE_STATE_WRITING:
+               if (priv->rule->status == MLX5HWS_RULE_STATUS_FAILING) {
+                       /* Backup original RTCs */
+                       u32 orig_rtc_0 = priv->rule->resize_info->rtc_0;
+                       u32 orig_rtc_1 = priv->rule->resize_info->rtc_1;
+
+                       /* Delete partially failed move rule using resize_info */
+                       priv->rule->resize_info->rtc_0 = priv->rule->rtc_0;
+                       priv->rule->resize_info->rtc_1 = priv->rule->rtc_1;
+
+                       /* Move rule to original RTC for future delete */
+                       priv->rule->rtc_0 = orig_rtc_0;
+                       priv->rule->rtc_1 = orig_rtc_1;
+               }
+               /* Clean leftovers */
+               mlx5hws_rule_move_hws_remove(priv->rule, queue, priv->user_data);
+               break;
+
+       case MLX5HWS_RULE_RESIZE_STATE_DELETING:
+               if (priv->rule->status == MLX5HWS_RULE_STATUS_FAILING) {
+                       *status = MLX5HWS_FLOW_OP_ERROR;
+               } else {
+                       *status = MLX5HWS_FLOW_OP_SUCCESS;
+                       priv->rule->matcher = priv->rule->matcher->resize_dst;
+               }
+               priv->rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_IDLE;
+               priv->rule->status = MLX5HWS_RULE_STATUS_CREATED;
+               break;
+
+       default:
+               break;
+       }
+}
+
+static void hws_send_engine_update_rule(struct mlx5hws_send_engine *queue,
+                                       struct mlx5hws_send_ring_priv *priv,
+                                       u16 wqe_cnt,
+                                       enum mlx5hws_flow_op_status *status)
+{
+       priv->rule->pending_wqes--;
+
+       if (*status == MLX5HWS_FLOW_OP_ERROR) {
+               if (priv->retry_id) {
+                       hws_send_engine_retry_post_send(queue, priv, wqe_cnt);
+                       return;
+               }
+               /* Some part of the rule failed */
+               priv->rule->status = MLX5HWS_RULE_STATUS_FAILING;
+               *priv->used_id = 0;
+       } else {
+               *priv->used_id = priv->id;
+       }
+
+       /* Update rule status for the last completion */
+       if (!priv->rule->pending_wqes) {
+               if (unlikely(mlx5hws_rule_move_in_progress(priv->rule))) {
+                       hws_send_engine_update_rule_resize(queue, priv, status);
+                       return;
+               }
+
+               if (unlikely(priv->rule->status == MLX5HWS_RULE_STATUS_FAILING)) {
+                       /* Rule completely failed and doesn't require cleanup */
+                       if (!priv->rule->rtc_0 && !priv->rule->rtc_1)
+                               priv->rule->status = MLX5HWS_RULE_STATUS_FAILED;
+
+                       *status = MLX5HWS_FLOW_OP_ERROR;
+               } else {
+                       /* Increase the status, this only works on good flow as the enum
+                        * is arrange it away creating -> created -> deleting -> deleted
+                        */
+                       priv->rule->status++;
+                       *status = MLX5HWS_FLOW_OP_SUCCESS;
+                       /* Rule was deleted now we can safely release action STEs
+                        * and clear resize info
+                        */
+                       if (priv->rule->status == MLX5HWS_RULE_STATUS_DELETED) {
+                               mlx5hws_rule_free_action_ste(priv->rule);
+                               mlx5hws_rule_clear_resize_info(priv->rule);
+                       }
+               }
+       }
+}
+
+static void hws_send_engine_update(struct mlx5hws_send_engine *queue,
+                                  struct mlx5_cqe64 *cqe,
+                                  struct mlx5hws_send_ring_priv *priv,
+                                  struct mlx5hws_flow_op_result res[],
+                                  s64 *i,
+                                  u32 res_nb,
+                                  u16 wqe_cnt)
+{
+       enum mlx5hws_flow_op_status status;
+
+       if (!cqe || (likely(be32_to_cpu(cqe->byte_cnt) >> 31 == 0) &&
+                    likely(get_cqe_opcode(cqe) == MLX5_CQE_REQ))) {
+               status = MLX5HWS_FLOW_OP_SUCCESS;
+       } else {
+               status = MLX5HWS_FLOW_OP_ERROR;
+       }
+
+       if (priv->user_data) {
+               if (priv->rule) {
+                       hws_send_engine_update_rule(queue, priv, wqe_cnt, &status);
+                       /* Completion is provided on the last rule WQE */
+                       if (priv->rule->pending_wqes)
+                               return;
+               }
+
+               if (*i < res_nb) {
+                       res[*i].user_data = priv->user_data;
+                       res[*i].status = status;
+                       (*i)++;
+                       mlx5hws_send_engine_dec_rule(queue);
+               } else {
+                       mlx5hws_send_engine_gen_comp(queue, priv->user_data, status);
+               }
+       }
+}
+
+static int mlx5hws_parse_cqe(struct mlx5hws_send_ring_cq *cq,
+                            struct mlx5_cqe64 *cqe64)
+{
+       if (unlikely(get_cqe_opcode(cqe64) != MLX5_CQE_REQ)) {
+               struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe64;
+
+               mlx5_core_err(cq->mdev, "Bad OP in HWS SQ CQE: 0x%x\n", get_cqe_opcode(cqe64));
+               mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n", err_cqe->vendor_err_synd);
+               mlx5_core_err(cq->mdev, "syndrome=%x\n", err_cqe->syndrome);
+               print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
+                              16, 1, err_cqe,
+                              sizeof(*err_cqe), false);
+               return CQ_POLL_ERR;
+       }
+
+       return CQ_OK;
+}
+
+static int mlx5hws_cq_poll_one(struct mlx5hws_send_ring_cq *cq)
+{
+       struct mlx5_cqe64 *cqe64;
+       int err;
+
+       cqe64 = mlx5_cqwq_get_cqe(&cq->wq);
+       if (!cqe64) {
+               if (unlikely(cq->mdev->state ==
+                            MLX5_DEVICE_STATE_INTERNAL_ERROR)) {
+                       mlx5_core_dbg_once(cq->mdev,
+                                          "Polling CQ while device is shutting down\n");
+                       return CQ_POLL_ERR;
+               }
+               return CQ_EMPTY;
+       }
+
+       mlx5_cqwq_pop(&cq->wq);
+       err = mlx5hws_parse_cqe(cq, cqe64);
+       mlx5_cqwq_update_db_record(&cq->wq);
+
+       return err;
+}
+
+static void hws_send_engine_poll_cq(struct mlx5hws_send_engine *queue,
+                                   struct mlx5hws_flow_op_result res[],
+                                   s64 *polled,
+                                   u32 res_nb)
+{
+       struct mlx5hws_send_ring *send_ring = &queue->send_ring;
+       struct mlx5hws_send_ring_cq *cq = &send_ring->send_cq;
+       struct mlx5hws_send_ring_sq *sq = &send_ring->send_sq;
+       struct mlx5hws_send_ring_priv *priv;
+       struct mlx5_cqe64 *cqe;
+       u8 cqe_opcode;
+       u16 wqe_cnt;
+
+       cqe = mlx5_cqwq_get_cqe(&cq->wq);
+       if (!cqe)
+               return;
+
+       cqe_opcode = get_cqe_opcode(cqe);
+       if (cqe_opcode == MLX5_CQE_INVALID)
+               return;
+
+       if (unlikely(cqe_opcode != MLX5_CQE_REQ))
+               queue->err = true;
+
+       wqe_cnt = be16_to_cpu(cqe->wqe_counter) & sq->buf_mask;
+
+       while (cq->poll_wqe != wqe_cnt) {
+               priv = &sq->wr_priv[cq->poll_wqe];
+               hws_send_engine_update(queue, NULL, priv, res, polled, res_nb, 0);
+               cq->poll_wqe = (cq->poll_wqe + priv->num_wqebbs) & sq->buf_mask;
+       }
+
+       priv = &sq->wr_priv[wqe_cnt];
+       cq->poll_wqe = (wqe_cnt + priv->num_wqebbs) & sq->buf_mask;
+       hws_send_engine_update(queue, cqe, priv, res, polled, res_nb, wqe_cnt);
+       mlx5hws_cq_poll_one(cq);
+}
+
+static void hws_send_engine_poll_list(struct mlx5hws_send_engine *queue,
+                                     struct mlx5hws_flow_op_result res[],
+                                     s64 *polled,
+                                     u32 res_nb)
+{
+       struct mlx5hws_completed_poll *comp = &queue->completed;
+
+       while (comp->ci != comp->pi) {
+               if (*polled < res_nb) {
+                       res[*polled].status =
+                               comp->entries[comp->ci].status;
+                       res[*polled].user_data =
+                               comp->entries[comp->ci].user_data;
+                       (*polled)++;
+                       comp->ci = (comp->ci + 1) & comp->mask;
+                       mlx5hws_send_engine_dec_rule(queue);
+               } else {
+                       return;
+               }
+       }
+}
+
+static int hws_send_engine_poll(struct mlx5hws_send_engine *queue,
+                               struct mlx5hws_flow_op_result res[],
+                               u32 res_nb)
+{
+       s64 polled = 0;
+
+       hws_send_engine_poll_list(queue, res, &polled, res_nb);
+
+       if (polled >= res_nb)
+               return polled;
+
+       hws_send_engine_poll_cq(queue, res, &polled, res_nb);
+
+       return polled;
+}
+
+int mlx5hws_send_queue_poll(struct mlx5hws_context *ctx,
+                           u16 queue_id,
+                           struct mlx5hws_flow_op_result res[],
+                           u32 res_nb)
+{
+       return hws_send_engine_poll(&ctx->send_queue[queue_id], res, res_nb);
+}
+
+static int hws_send_ring_alloc_sq(struct mlx5_core_dev *mdev,
+                                 int numa_node,
+                                 struct mlx5hws_send_engine *queue,
+                                 struct mlx5hws_send_ring_sq *sq,
+                                 void *sqc_data)
+{
+       void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+       struct mlx5_wq_cyc *wq = &sq->wq;
+       struct mlx5_wq_param param;
+       size_t buf_sz;
+       int err;
+
+       sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+       sq->mdev = mdev;
+
+       param.db_numa_node = numa_node;
+       param.buf_numa_node = numa_node;
+       err = mlx5_wq_cyc_create(mdev, &param, sqc_wq, wq, &sq->wq_ctrl);
+       if (err)
+               return err;
+       wq->db = &wq->db[MLX5_SND_DBR];
+
+       buf_sz = queue->num_entries * MAX_WQES_PER_RULE;
+       sq->dep_wqe = kcalloc(queue->num_entries, sizeof(*sq->dep_wqe), GFP_KERNEL);
+       if (!sq->dep_wqe) {
+               err = -ENOMEM;
+               goto destroy_wq_cyc;
+       }
+
+       sq->wr_priv = kzalloc(sizeof(*sq->wr_priv) * buf_sz, GFP_KERNEL);
+       if (!sq->wr_priv) {
+               err = -ENOMEM;
+               goto free_dep_wqe;
+       }
+
+       sq->buf_mask = (queue->num_entries * MAX_WQES_PER_RULE) - 1;
+
+       return 0;
+
+free_dep_wqe:
+       kfree(sq->dep_wqe);
+destroy_wq_cyc:
+       mlx5_wq_destroy(&sq->wq_ctrl);
+       return err;
+}
+
+static void hws_send_ring_free_sq(struct mlx5hws_send_ring_sq *sq)
+{
+       if (!sq)
+               return;
+       kfree(sq->wr_priv);
+       kfree(sq->dep_wqe);
+       mlx5_wq_destroy(&sq->wq_ctrl);
+}
+
+static int hws_send_ring_create_sq(struct mlx5_core_dev *mdev, u32 pdn,
+                                  void *sqc_data,
+                                  struct mlx5hws_send_engine *queue,
+                                  struct mlx5hws_send_ring_sq *sq,
+                                  struct mlx5hws_send_ring_cq *cq)
+{
+       void *in, *sqc, *wq;
+       int inlen, err;
+       u8 ts_format;
+
+       inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+               sizeof(u64) * sq->wq_ctrl.buf.npages;
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+       wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+       memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
+       MLX5_SET(sqc, sqc, cqn, cq->mcq.cqn);
+
+       MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+       MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+
+       ts_format = mlx5_is_real_time_sq(mdev) ? MLX5_TIMESTAMP_FORMAT_REAL_TIME :
+                                                MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
+       MLX5_SET(sqc, sqc, ts_format, ts_format);
+
+       MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+       MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
+       MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+       MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+
+       mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
+                                 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+       err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+
+       kvfree(in);
+
+       return err;
+}
+
+static void hws_send_ring_destroy_sq(struct mlx5_core_dev *mdev,
+                                    struct mlx5hws_send_ring_sq *sq)
+{
+       mlx5_core_destroy_sq(mdev, sq->sqn);
+}
+
+static int hws_send_ring_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn)
+{
+       void *in, *sqc;
+       int inlen, err;
+
+       inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
+       sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+       MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
+
+       err = mlx5_core_modify_sq(mdev, sqn, in);
+
+       kvfree(in);
+
+       return err;
+}
+
+static void hws_send_ring_close_sq(struct mlx5hws_send_ring_sq *sq)
+{
+       mlx5_core_destroy_sq(sq->mdev, sq->sqn);
+       mlx5_wq_destroy(&sq->wq_ctrl);
+       kfree(sq->wr_priv);
+       kfree(sq->dep_wqe);
+}
+
+static int hws_send_ring_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn,
+                                      void *sqc_data,
+                                      struct mlx5hws_send_engine *queue,
+                                      struct mlx5hws_send_ring_sq *sq,
+                                      struct mlx5hws_send_ring_cq *cq)
+{
+       int err;
+
+       err = hws_send_ring_create_sq(mdev, pdn, sqc_data, queue, sq, cq);
+       if (err)
+               return err;
+
+       err = hws_send_ring_set_sq_rdy(mdev, sq->sqn);
+       if (err)
+               hws_send_ring_destroy_sq(mdev, sq);
+
+       return err;
+}
+
+static int hws_send_ring_open_sq(struct mlx5hws_context *ctx,
+                                int numa_node,
+                                struct mlx5hws_send_engine *queue,
+                                struct mlx5hws_send_ring_sq *sq,
+                                struct mlx5hws_send_ring_cq *cq)
+{
+       size_t buf_sz, sq_log_buf_sz;
+       void *sqc_data, *wq;
+       int err;
+
+       sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
+       if (!sqc_data)
+               return -ENOMEM;
+
+       buf_sz = queue->num_entries * MAX_WQES_PER_RULE;
+       sq_log_buf_sz = ilog2(roundup_pow_of_two(buf_sz));
+
+       wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+       MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+       MLX5_SET(wq, wq, pd, ctx->pd_num);
+       MLX5_SET(wq, wq, log_wq_sz, sq_log_buf_sz);
+
+       err = hws_send_ring_alloc_sq(ctx->mdev, numa_node, queue, sq, sqc_data);
+       if (err)
+               goto err_free_sqc;
+
+       err = hws_send_ring_create_sq_rdy(ctx->mdev, ctx->pd_num, sqc_data,
+                                         queue, sq, cq);
+       if (err)
+               goto err_free_sq;
+
+       kvfree(sqc_data);
+
+       return 0;
+err_free_sq:
+       hws_send_ring_free_sq(sq);
+err_free_sqc:
+       kvfree(sqc_data);
+       return err;
+}
+
+static void hws_cq_complete(struct mlx5_core_cq *mcq,
+                           struct mlx5_eqe *eqe)
+{
+       pr_err("CQ completion CQ: #%u\n", mcq->cqn);
+}
+
+static int hws_send_ring_alloc_cq(struct mlx5_core_dev *mdev,
+                                 int numa_node,
+                                 struct mlx5hws_send_engine *queue,
+                                 void *cqc_data,
+                                 struct mlx5hws_send_ring_cq *cq)
+{
+       struct mlx5_core_cq *mcq = &cq->mcq;
+       struct mlx5_wq_param param;
+       struct mlx5_cqe64 *cqe;
+       int err;
+       u32 i;
+
+       param.buf_numa_node = numa_node;
+       param.db_numa_node = numa_node;
+
+       err = mlx5_cqwq_create(mdev, &param, cqc_data, &cq->wq, &cq->wq_ctrl);
+       if (err)
+               return err;
+
+       mcq->cqe_sz = 64;
+       mcq->set_ci_db = cq->wq_ctrl.db.db;
+       mcq->arm_db = cq->wq_ctrl.db.db + 1;
+       mcq->comp = hws_cq_complete;
+
+       for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+               cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+               cqe->op_own = 0xf1;
+       }
+
+       cq->mdev = mdev;
+
+       return 0;
+}
+
+static int hws_send_ring_create_cq(struct mlx5_core_dev *mdev,
+                                  struct mlx5hws_send_engine *queue,
+                                  void *cqc_data,
+                                  struct mlx5hws_send_ring_cq *cq)
+{
+       u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+       struct mlx5_core_cq *mcq = &cq->mcq;
+       void *in, *cqc;
+       int inlen, eqn;
+       int err;
+
+       err = mlx5_comp_eqn_get(mdev, 0, &eqn);
+       if (err)
+               return err;
+
+       inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+               sizeof(u64) * cq->wq_ctrl.buf.npages;
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+       memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
+       mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
+                                 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+       MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+       MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+       MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+       MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+       err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
+
+       kvfree(in);
+
+       return err;
+}
+
+static int hws_send_ring_open_cq(struct mlx5_core_dev *mdev,
+                                struct mlx5hws_send_engine *queue,
+                                int numa_node,
+                                struct mlx5hws_send_ring_cq *cq)
+{
+       void *cqc_data;
+       int err;
+
+       cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
+       if (!cqc_data)
+               return -ENOMEM;
+
+       MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
+       MLX5_SET(cqc, cqc_data, cqe_sz, queue->num_entries);
+       MLX5_SET(cqc, cqc_data, log_cq_size, ilog2(queue->num_entries));
+
+       err = hws_send_ring_alloc_cq(mdev, numa_node, queue, cqc_data, cq);
+       if (err)
+               goto err_out;
+
+       err = hws_send_ring_create_cq(mdev, queue, cqc_data, cq);
+       if (err)
+               goto err_free_cq;
+
+       kvfree(cqc_data);
+
+       return 0;
+
+err_free_cq:
+       mlx5_wq_destroy(&cq->wq_ctrl);
+err_out:
+       kvfree(cqc_data);
+       return err;
+}
+
+static void hws_send_ring_close_cq(struct mlx5hws_send_ring_cq *cq)
+{
+       mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
+       mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static void hws_send_ring_close(struct mlx5hws_send_engine *queue)
+{
+       hws_send_ring_close_sq(&queue->send_ring.send_sq);
+       hws_send_ring_close_cq(&queue->send_ring.send_cq);
+}
+
+static int mlx5hws_send_ring_open(struct mlx5hws_context *ctx,
+                                 struct mlx5hws_send_engine *queue)
+{
+       int numa_node = dev_to_node(mlx5_core_dma_dev(ctx->mdev));
+       struct mlx5hws_send_ring *ring = &queue->send_ring;
+       int err;
+
+       err = hws_send_ring_open_cq(ctx->mdev, queue, numa_node, &ring->send_cq);
+       if (err)
+               return err;
+
+       err = hws_send_ring_open_sq(ctx, numa_node, queue, &ring->send_sq,
+                                   &ring->send_cq);
+       if (err)
+               goto close_cq;
+
+       return err;
+
+close_cq:
+       hws_send_ring_close_cq(&ring->send_cq);
+       return err;
+}
+
+void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue)
+{
+       hws_send_ring_close(queue);
+       kfree(queue->completed.entries);
+}
+
+int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
+                           struct mlx5hws_send_engine *queue,
+                           u16 queue_size)
+{
+       int err;
+
+       mutex_init(&queue->lock);
+
+       queue->num_entries = roundup_pow_of_two(queue_size);
+       queue->used_entries = 0;
+
+       queue->completed.entries = kcalloc(queue->num_entries,
+                                          sizeof(queue->completed.entries[0]),
+                                          GFP_KERNEL);
+       if (!queue->completed.entries)
+               return -ENOMEM;
+
+       queue->completed.pi = 0;
+       queue->completed.ci = 0;
+       queue->completed.mask = queue->num_entries - 1;
+       err = mlx5hws_send_ring_open(ctx, queue);
+       if (err)
+               goto free_completed_entries;
+
+       return 0;
+
+free_completed_entries:
+       kfree(queue->completed.entries);
+       return err;
+}
+
+static void __hws_send_queues_close(struct mlx5hws_context *ctx, u16 queues)
+{
+       while (queues--)
+               mlx5hws_send_queue_close(&ctx->send_queue[queues]);
+}
+
+static void hws_send_queues_bwc_locks_destroy(struct mlx5hws_context *ctx)
+{
+       int bwc_queues = mlx5hws_bwc_queues(ctx);
+       int i;
+
+       if (!mlx5hws_context_bwc_supported(ctx))
+               return;
+
+       for (i = 0; i < bwc_queues; i++) {
+               mutex_destroy(&ctx->bwc_send_queue_locks[i]);
+               lockdep_unregister_key(ctx->bwc_lock_class_keys + i);
+       }
+
+       kfree(ctx->bwc_lock_class_keys);
+       kfree(ctx->bwc_send_queue_locks);
+}
+
+void mlx5hws_send_queues_close(struct mlx5hws_context *ctx)
+{
+       hws_send_queues_bwc_locks_destroy(ctx);
+       __hws_send_queues_close(ctx, ctx->queues);
+       kfree(ctx->send_queue);
+}
+
+static int hws_bwc_send_queues_init(struct mlx5hws_context *ctx)
+{
+       /* Number of BWC queues is equal to number of the usual HWS queues */
+       int bwc_queues = ctx->queues - 1;
+       int i;
+
+       if (!mlx5hws_context_bwc_supported(ctx))
+               return 0;
+
+       ctx->queues += bwc_queues;
+
+       ctx->bwc_send_queue_locks = kcalloc(bwc_queues,
+                                           sizeof(*ctx->bwc_send_queue_locks),
+                                           GFP_KERNEL);
+
+       if (!ctx->bwc_send_queue_locks)
+               return -ENOMEM;
+
+       ctx->bwc_lock_class_keys = kcalloc(bwc_queues,
+                                          sizeof(*ctx->bwc_lock_class_keys),
+                                          GFP_KERNEL);
+       if (!ctx->bwc_lock_class_keys)
+               goto err_lock_class_keys;
+
+       for (i = 0; i < bwc_queues; i++) {
+               mutex_init(&ctx->bwc_send_queue_locks[i]);
+               lockdep_register_key(ctx->bwc_lock_class_keys + i);
+       }
+
+       return 0;
+
+err_lock_class_keys:
+       kfree(ctx->bwc_send_queue_locks);
+       return -ENOMEM;
+}
+
+int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
+                            u16 queues,
+                            u16 queue_size)
+{
+       int err = 0;
+       u32 i;
+
+       /* Open one extra queue for control path */
+       ctx->queues = queues + 1;
+
+       /* open a separate set of queues and locks for bwc API */
+       err = hws_bwc_send_queues_init(ctx);
+       if (err)
+               return err;
+
+       ctx->send_queue = kcalloc(ctx->queues, sizeof(*ctx->send_queue), GFP_KERNEL);
+       if (!ctx->send_queue) {
+               err = -ENOMEM;
+               goto free_bwc_locks;
+       }
+
+       for (i = 0; i < ctx->queues; i++) {
+               err = mlx5hws_send_queue_open(ctx, &ctx->send_queue[i], queue_size);
+               if (err)
+                       goto close_send_queues;
+       }
+
+       return 0;
+
+close_send_queues:
+        __hws_send_queues_close(ctx, i);
+
+       kfree(ctx->send_queue);
+
+free_bwc_locks:
+       hws_send_queues_bwc_locks_destroy(ctx);
+
+       return err;
+}
+
+int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
+                             u16 queue_id,
+                             u32 actions)
+{
+       struct mlx5hws_send_ring_sq *send_sq;
+       struct mlx5hws_send_engine *queue;
+       bool wait_comp = false;
+       s64 polled = 0;
+
+       queue = &ctx->send_queue[queue_id];
+       send_sq = &queue->send_ring.send_sq;
+
+       switch (actions) {
+       case MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC:
+               wait_comp = true;
+               fallthrough;
+       case MLX5HWS_SEND_QUEUE_ACTION_DRAIN_ASYNC:
+               if (send_sq->head_dep_idx != send_sq->tail_dep_idx)
+                       /* Send dependent WQEs to drain the queue */
+                       mlx5hws_send_all_dep_wqe(queue);
+               else
+                       /* Signal on the last posted WQE */
+                       mlx5hws_send_engine_flush_queue(queue);
+
+               /* Poll queue until empty */
+               while (wait_comp && !mlx5hws_send_engine_empty(queue))
+                       hws_send_engine_poll_cq(queue, NULL, &polled, 0);
+
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int
+hws_send_wqe_fw(struct mlx5_core_dev *mdev,
+               u32 pd_num,
+               struct mlx5hws_send_engine_post_attr *send_attr,
+               struct mlx5hws_wqe_gta_ctrl_seg *send_wqe_ctrl,
+               void *send_wqe_match_data,
+               void *send_wqe_match_tag,
+               void *send_wqe_range_data,
+               void *send_wqe_range_tag,
+               bool is_jumbo,
+               u8 gta_opcode)
+{
+       bool has_range = send_wqe_range_data || send_wqe_range_tag;
+       bool has_match = send_wqe_match_data || send_wqe_match_tag;
+       struct mlx5hws_wqe_gta_data_seg_ste gta_wqe_data0 = {0};
+       struct mlx5hws_wqe_gta_data_seg_ste gta_wqe_data1 = {0};
+       struct mlx5hws_wqe_gta_ctrl_seg gta_wqe_ctrl = {0};
+       struct mlx5hws_cmd_generate_wqe_attr attr = {0};
+       struct mlx5hws_wqe_ctrl_seg wqe_ctrl = {0};
+       struct mlx5_cqe64 cqe;
+       u32 flags = 0;
+       int ret;
+
+       /* Set WQE control */
+       wqe_ctrl.opmod_idx_opcode = cpu_to_be32((send_attr->opmod << 24) | send_attr->opcode);
+       wqe_ctrl.qpn_ds = cpu_to_be32((send_attr->len + sizeof(struct mlx5hws_wqe_ctrl_seg)) / 16);
+       flags |= send_attr->notify_hw ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
+       wqe_ctrl.flags = cpu_to_be32(flags);
+       wqe_ctrl.imm = cpu_to_be32(send_attr->id);
+
+       /* Set GTA WQE CTRL */
+       memcpy(gta_wqe_ctrl.stc_ix, send_wqe_ctrl->stc_ix, sizeof(send_wqe_ctrl->stc_ix));
+       gta_wqe_ctrl.op_dirix = cpu_to_be32(gta_opcode << 28);
+
+       /* Set GTA match WQE DATA */
+       if (has_match) {
+               if (send_wqe_match_data)
+                       memcpy(&gta_wqe_data0, send_wqe_match_data, sizeof(gta_wqe_data0));
+               else
+                       hws_send_wqe_set_tag(&gta_wqe_data0, send_wqe_match_tag, is_jumbo);
+
+               gta_wqe_data0.rsvd1_definer = cpu_to_be32(send_attr->match_definer_id << 8);
+               attr.gta_data_0 = (u8 *)&gta_wqe_data0;
+       }
+
+       /* Set GTA range WQE DATA */
+       if (has_range) {
+               if (send_wqe_range_data)
+                       memcpy(&gta_wqe_data1, send_wqe_range_data, sizeof(gta_wqe_data1));
+               else
+                       hws_send_wqe_set_tag(&gta_wqe_data1, send_wqe_range_tag, false);
+
+               gta_wqe_data1.rsvd1_definer = cpu_to_be32(send_attr->range_definer_id << 8);
+               attr.gta_data_1 = (u8 *)&gta_wqe_data1;
+       }
+
+       attr.pdn = pd_num;
+       attr.wqe_ctrl = (u8 *)&wqe_ctrl;
+       attr.gta_ctrl = (u8 *)&gta_wqe_ctrl;
+
+send_wqe:
+       ret = mlx5hws_cmd_generate_wqe(mdev, &attr, &cqe);
+       if (ret) {
+               mlx5_core_err(mdev, "Failed to write WQE using command");
+               return ret;
+       }
+
+       if ((get_cqe_opcode(&cqe) == MLX5_CQE_REQ) &&
+           (be32_to_cpu(cqe.byte_cnt) >> 31 == 0)) {
+               *send_attr->used_id = send_attr->id;
+               return 0;
+       }
+
+       /* Retry if rule failed */
+       if (send_attr->retry_id) {
+               wqe_ctrl.imm = cpu_to_be32(send_attr->retry_id);
+               send_attr->id = send_attr->retry_id;
+               send_attr->retry_id = 0;
+               goto send_wqe;
+       }
+
+       return -1;
+}
+
+void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx,
+                         struct mlx5hws_send_engine *queue,
+                         struct mlx5hws_send_ste_attr *ste_attr)
+{
+       struct mlx5hws_send_engine_post_attr *send_attr = &ste_attr->send_attr;
+       struct mlx5hws_rule *rule = send_attr->rule;
+       struct mlx5_core_dev *mdev;
+       u16 queue_id;
+       u32 pdn;
+       int ret;
+
+       queue_id = queue - ctx->send_queue;
+       mdev = ctx->mdev;
+       pdn = ctx->pd_num;
+
+       /* Writing through FW can't HW fence, therefore we drain the queue */
+       if (send_attr->fence)
+               mlx5hws_send_queue_action(ctx,
+                                         queue_id,
+                                         MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
+
+       if (ste_attr->rtc_1) {
+               send_attr->id = ste_attr->rtc_1;
+               send_attr->used_id = ste_attr->used_id_rtc_1;
+               send_attr->retry_id = ste_attr->retry_rtc_1;
+               ret = hws_send_wqe_fw(mdev, pdn, send_attr,
+                                     ste_attr->wqe_ctrl,
+                                     ste_attr->wqe_data,
+                                     ste_attr->wqe_tag,
+                                     ste_attr->range_wqe_data,
+                                     ste_attr->range_wqe_tag,
+                                     ste_attr->wqe_tag_is_jumbo,
+                                     ste_attr->gta_opcode);
+               if (ret)
+                       goto fail_rule;
+       }
+
+       if (ste_attr->rtc_0) {
+               send_attr->id = ste_attr->rtc_0;
+               send_attr->used_id = ste_attr->used_id_rtc_0;
+               send_attr->retry_id = ste_attr->retry_rtc_0;
+               ret = hws_send_wqe_fw(mdev, pdn, send_attr,
+                                     ste_attr->wqe_ctrl,
+                                     ste_attr->wqe_data,
+                                     ste_attr->wqe_tag,
+                                     ste_attr->range_wqe_data,
+                                     ste_attr->range_wqe_tag,
+                                     ste_attr->wqe_tag_is_jumbo,
+                                     ste_attr->gta_opcode);
+               if (ret)
+                       goto fail_rule;
+       }
+
+       /* Increase the status, this only works on good flow as the enum
+        * is arrange it away creating -> created -> deleting -> deleted
+        */
+       if (likely(rule))
+               rule->status++;
+
+       mlx5hws_send_engine_gen_comp(queue, send_attr->user_data, MLX5HWS_FLOW_OP_SUCCESS);
+
+       return;
+
+fail_rule:
+       if (likely(rule))
+               rule->status = !rule->rtc_0 && !rule->rtc_1 ?
+                       MLX5HWS_RULE_STATUS_FAILED : MLX5HWS_RULE_STATUS_FAILING;
+
+       mlx5hws_send_engine_gen_comp(queue, send_attr->user_data, MLX5HWS_FLOW_OP_ERROR);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
new file mode 100644 (file)
index 0000000..b50825d
--- /dev/null
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_SEND_H_
+#define MLX5HWS_SEND_H_
+
+/* As a single operation requires at least two WQEBBS.
+ * This means a maximum of 16 such operations per rule.
+ */
+#define MAX_WQES_PER_RULE 32
+
+enum mlx5hws_wqe_opcode {
+       MLX5HWS_WQE_OPCODE_TBL_ACCESS = 0x2c,
+};
+
+enum mlx5hws_wqe_opmod {
+       MLX5HWS_WQE_OPMOD_GTA_STE = 0,
+       MLX5HWS_WQE_OPMOD_GTA_MOD_ARG = 1,
+};
+
+enum mlx5hws_wqe_gta_opcode {
+       MLX5HWS_WQE_GTA_OP_ACTIVATE = 0,
+       MLX5HWS_WQE_GTA_OP_DEACTIVATE = 1,
+};
+
+enum mlx5hws_wqe_gta_opmod {
+       MLX5HWS_WQE_GTA_OPMOD_STE = 0,
+       MLX5HWS_WQE_GTA_OPMOD_MOD_ARG = 1,
+};
+
+enum mlx5hws_wqe_gta_sz {
+       MLX5HWS_WQE_SZ_GTA_CTRL = 48,
+       MLX5HWS_WQE_SZ_GTA_DATA = 64,
+};
+
+/* WQE Control segment. */
+struct mlx5hws_wqe_ctrl_seg {
+       __be32 opmod_idx_opcode;
+       __be32 qpn_ds;
+       __be32 flags;
+       __be32 imm;
+};
+
+struct mlx5hws_wqe_gta_ctrl_seg {
+       __be32 op_dirix;
+       __be32 stc_ix[5];
+       __be32 rsvd0[6];
+};
+
+struct mlx5hws_wqe_gta_data_seg_ste {
+       __be32 rsvd0_ctr_id;
+       __be32 rsvd1_definer;
+       __be32 rsvd2[3];
+       union {
+               struct {
+               __be32 action[3];
+               __be32 tag[8];
+               };
+               __be32 jumbo[11];
+       };
+};
+
+struct mlx5hws_wqe_gta_data_seg_arg {
+       __be32 action_args[8];
+};
+
+struct mlx5hws_wqe_gta {
+       struct mlx5hws_wqe_gta_ctrl_seg gta_ctrl;
+       union {
+               struct mlx5hws_wqe_gta_data_seg_ste seg_ste;
+               struct mlx5hws_wqe_gta_data_seg_arg seg_arg;
+       };
+};
+
+struct mlx5hws_send_ring_cq {
+       struct mlx5_core_dev *mdev;
+       struct mlx5_cqwq wq;
+       struct mlx5_wq_ctrl wq_ctrl;
+       struct mlx5_core_cq mcq;
+       u16 poll_wqe;
+};
+
+struct mlx5hws_send_ring_priv {
+       struct mlx5hws_rule *rule;
+       void *user_data;
+       u32 num_wqebbs;
+       u32 id;
+       u32 retry_id;
+       u32 *used_id;
+};
+
+struct mlx5hws_send_ring_dep_wqe {
+       struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl;
+       struct mlx5hws_wqe_gta_data_seg_ste wqe_data;
+       struct mlx5hws_rule *rule;
+       u32 rtc_0;
+       u32 rtc_1;
+       u32 retry_rtc_0;
+       u32 retry_rtc_1;
+       u32 direct_index;
+       void *user_data;
+};
+
+struct mlx5hws_send_ring_sq {
+       struct mlx5_core_dev *mdev;
+       u16 cur_post;
+       u16 buf_mask;
+       struct mlx5hws_send_ring_priv *wr_priv;
+       unsigned int last_idx;
+       struct mlx5hws_send_ring_dep_wqe *dep_wqe;
+       unsigned int head_dep_idx;
+       unsigned int tail_dep_idx;
+       u32 sqn;
+       struct mlx5_wq_cyc wq;
+       struct mlx5_wq_ctrl wq_ctrl;
+       void __iomem *uar_map;
+};
+
+struct mlx5hws_send_ring {
+       struct mlx5hws_send_ring_cq send_cq;
+       struct mlx5hws_send_ring_sq send_sq;
+};
+
+struct mlx5hws_completed_poll_entry {
+       void *user_data;
+       enum mlx5hws_flow_op_status status;
+};
+
+struct mlx5hws_completed_poll {
+       struct mlx5hws_completed_poll_entry *entries;
+       u16 ci;
+       u16 pi;
+       u16 mask;
+};
+
+struct mlx5hws_send_engine {
+       struct mlx5hws_send_ring send_ring;
+       struct mlx5_uars_page *uar; /* Uar is shared between rings of a queue */
+       struct mlx5hws_completed_poll completed;
+       u16 used_entries;
+       u16 num_entries;
+       bool err;
+       struct mutex lock; /* Protects the send engine */
+};
+
+struct mlx5hws_send_engine_post_ctrl {
+       struct mlx5hws_send_engine *queue;
+       struct mlx5hws_send_ring *send_ring;
+       size_t num_wqebbs;
+};
+
+struct mlx5hws_send_engine_post_attr {
+       u8 opcode;
+       u8 opmod;
+       u8 notify_hw;
+       u8 fence;
+       u8 match_definer_id;
+       u8 range_definer_id;
+       size_t len;
+       struct mlx5hws_rule *rule;
+       u32 id;
+       u32 retry_id;
+       u32 *used_id;
+       void *user_data;
+};
+
+struct mlx5hws_send_ste_attr {
+       u32 rtc_0;
+       u32 rtc_1;
+       u32 retry_rtc_0;
+       u32 retry_rtc_1;
+       u32 *used_id_rtc_0;
+       u32 *used_id_rtc_1;
+       bool wqe_tag_is_jumbo;
+       u8 gta_opcode;
+       u32 direct_index;
+       struct mlx5hws_send_engine_post_attr send_attr;
+       struct mlx5hws_rule_match_tag *wqe_tag;
+       struct mlx5hws_rule_match_tag *range_wqe_tag;
+       struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+       struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
+       struct mlx5hws_wqe_gta_data_seg_ste *range_wqe_data;
+};
+
+struct mlx5hws_send_ring_dep_wqe *
+mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue);
+
+int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
+                           struct mlx5hws_send_engine *queue,
+                           u16 queue_size);
+
+void mlx5hws_send_queues_close(struct mlx5hws_context *ctx);
+
+int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
+                            u16 queues,
+                            u16 queue_size);
+
+int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
+                             u16 queue_id,
+                             u32 actions);
+
+int mlx5hws_send_test(struct mlx5hws_context *ctx,
+                     u16 queues,
+                     u16 queue_size);
+
+struct mlx5hws_send_engine_post_ctrl
+mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl,
+                                     char **buf, size_t *len);
+
+void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl,
+                                 struct mlx5hws_send_engine_post_attr *attr);
+
+void mlx5hws_send_ste(struct mlx5hws_send_engine *queue,
+                     struct mlx5hws_send_ste_attr *ste_attr);
+
+void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx,
+                         struct mlx5hws_send_engine *queue,
+                         struct mlx5hws_send_ste_attr *ste_attr);
+
+void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue);
+
+static inline bool mlx5hws_send_engine_empty(struct mlx5hws_send_engine *queue)
+{
+       struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
+       struct mlx5hws_send_ring_cq *send_cq = &queue->send_ring.send_cq;
+
+       return ((send_sq->cur_post & send_sq->buf_mask) == send_cq->poll_wqe);
+}
+
+static inline bool mlx5hws_send_engine_full(struct mlx5hws_send_engine *queue)
+{
+       return queue->used_entries >= queue->num_entries;
+}
+
+static inline void mlx5hws_send_engine_inc_rule(struct mlx5hws_send_engine *queue)
+{
+       queue->used_entries++;
+}
+
+static inline void mlx5hws_send_engine_dec_rule(struct mlx5hws_send_engine *queue)
+{
+       queue->used_entries--;
+}
+
+static inline void mlx5hws_send_engine_gen_comp(struct mlx5hws_send_engine *queue,
+                                               void *user_data,
+                                               int comp_status)
+{
+       struct mlx5hws_completed_poll *comp = &queue->completed;
+
+       comp->entries[comp->pi].status = comp_status;
+       comp->entries[comp->pi].user_data = user_data;
+
+       comp->pi = (comp->pi + 1) & comp->mask;
+}
+
+static inline bool mlx5hws_send_engine_err(struct mlx5hws_send_engine *queue)
+{
+       return queue->err;
+}
+
+#endif /* MLX5HWS_SEND_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
new file mode 100644 (file)
index 0000000..9576e02
--- /dev/null
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl)
+{
+       return tbl->ft_id;
+}
+
+static void hws_table_init_next_ft_attr(struct mlx5hws_table *tbl,
+                                       struct mlx5hws_cmd_ft_create_attr *ft_attr)
+{
+       ft_attr->type = tbl->fw_ft_type;
+       if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
+               ft_attr->level = tbl->ctx->caps->fdb_ft.max_level - 1;
+       else
+               ft_attr->level = tbl->ctx->caps->nic_ft.max_level - 1;
+       ft_attr->rtc_valid = true;
+}
+
+static void hws_table_set_cap_attr(struct mlx5hws_table *tbl,
+                                  struct mlx5hws_cmd_ft_create_attr *ft_attr)
+{
+       /* Enabling reformat_en or decap_en for the first flow table
+        * must be done when all VFs are down.
+        * However, HWS doesn't know when it is required to create the first FT.
+        * On the other hand, HWS doesn't use all these FT capabilities at all
+        * (the API doesn't even provide a way to specify these flags), so we'll
+        * just set these caps on all the flow tables.
+        * If HCA_CAP.fdb_dynamic_tunnel is set, this constraint is N/A.
+        */
+       if (!MLX5_CAP_ESW_FLOWTABLE(tbl->ctx->mdev, fdb_dynamic_tunnel)) {
+               ft_attr->reformat_en = true;
+               ft_attr->decap_en = true;
+       }
+}
+
+static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
+{
+       struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+       struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
+       struct mlx5hws_cmd_forward_tbl *default_miss;
+       struct mlx5hws_cmd_set_fte_dest dest = {0};
+       struct mlx5hws_context *ctx = tbl->ctx;
+       u8 tbl_type = tbl->type;
+
+       if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
+               return 0;
+
+       if (ctx->common_res[tbl_type].default_miss) {
+               ctx->common_res[tbl_type].default_miss->refcount++;
+               return 0;
+       }
+
+       ft_attr.type = tbl->fw_ft_type;
+       ft_attr.level = tbl->ctx->caps->fdb_ft.max_level; /* The last level */
+       ft_attr.rtc_valid = false;
+
+       dest.destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+       dest.destination_id = ctx->caps->eswitch_manager_vport_number;
+
+       fte_attr.action_flags = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+       fte_attr.dests_num = 1;
+       fte_attr.dests = &dest;
+
+       default_miss = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr);
+       if (!default_miss) {
+               mlx5hws_err(ctx, "Failed to default miss table type: 0x%x\n", tbl_type);
+               return -EINVAL;
+       }
+
+       /* ctx->ctrl_lock must be held here */
+       ctx->common_res[tbl_type].default_miss = default_miss;
+       ctx->common_res[tbl_type].default_miss->refcount++;
+
+       return 0;
+}
+
+/* Called under ctx->ctrl_lock */
+static void hws_table_down_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
+{
+       struct mlx5hws_cmd_forward_tbl *default_miss;
+       struct mlx5hws_context *ctx = tbl->ctx;
+       u8 tbl_type = tbl->type;
+
+       if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
+               return;
+
+       default_miss = ctx->common_res[tbl_type].default_miss;
+       if (--default_miss->refcount)
+               return;
+
+       mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, default_miss);
+       ctx->common_res[tbl_type].default_miss = NULL;
+}
+
+static int hws_table_connect_to_default_miss_tbl(struct mlx5hws_table *tbl, u32 ft_id)
+{
+       struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+       int ret;
+
+       if (unlikely(tbl->type != MLX5HWS_TABLE_TYPE_FDB))
+               pr_warn("HWS: invalid table type %d\n", tbl->type);
+
+       mlx5hws_cmd_set_attr_connect_miss_tbl(tbl->ctx,
+                                             tbl->fw_ft_type,
+                                             tbl->type,
+                                             &ft_attr);
+
+       ret = mlx5hws_cmd_flow_table_modify(tbl->ctx->mdev, &ft_attr, ft_id);
+       if (ret) {
+               mlx5hws_err(tbl->ctx, "Failed to connect FT to default FDB FT\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
+                                   struct mlx5hws_table *tbl,
+                                   u32 *ft_id)
+{
+       struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+       int ret;
+
+       hws_table_init_next_ft_attr(tbl, &ft_attr);
+       hws_table_set_cap_attr(tbl, &ft_attr);
+
+       ret = mlx5hws_cmd_flow_table_create(mdev, &ft_attr, ft_id);
+       if (ret) {
+               mlx5hws_err(tbl->ctx, "Failed creating default ft\n");
+               return ret;
+       }
+
+       if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+               /* Take/create ref over the default miss */
+               ret = hws_table_up_default_fdb_miss_tbl(tbl);
+               if (ret) {
+                       mlx5hws_err(tbl->ctx, "Failed to get default fdb miss\n");
+                       goto free_ft_obj;
+               }
+               ret = hws_table_connect_to_default_miss_tbl(tbl, *ft_id);
+               if (ret) {
+                       mlx5hws_err(tbl->ctx, "Failed connecting to default miss tbl\n");
+                       goto down_miss_tbl;
+               }
+       }
+
+       return 0;
+
+down_miss_tbl:
+       hws_table_down_default_fdb_miss_tbl(tbl);
+free_ft_obj:
+       mlx5hws_cmd_flow_table_destroy(mdev, ft_attr.type, *ft_id);
+       return ret;
+}
+
+void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl,
+                                     u32 ft_id)
+{
+       mlx5hws_cmd_flow_table_destroy(tbl->ctx->mdev, tbl->fw_ft_type, ft_id);
+       hws_table_down_default_fdb_miss_tbl(tbl);
+}
+
+static int hws_table_init_check_hws_support(struct mlx5hws_context *ctx,
+                                           struct mlx5hws_table *tbl)
+{
+       if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT)) {
+               mlx5hws_err(ctx, "HWS not supported, cannot create mlx5hws_table\n");
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int hws_table_init(struct mlx5hws_table *tbl)
+{
+       struct mlx5hws_context *ctx = tbl->ctx;
+       int ret;
+
+       ret = hws_table_init_check_hws_support(ctx, tbl);
+       if (ret)
+               return ret;
+
+       if (mlx5hws_table_get_fw_ft_type(tbl->type, (u8 *)&tbl->fw_ft_type)) {
+               pr_warn("HWS: invalid table type %d\n", tbl->type);
+               return -EOPNOTSUPP;
+       }
+
+       mutex_lock(&ctx->ctrl_lock);
+       ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &tbl->ft_id);
+       if (ret) {
+               mlx5hws_err(tbl->ctx, "Failed to create flow table object\n");
+               mutex_unlock(&ctx->ctrl_lock);
+               return ret;
+       }
+
+       ret = mlx5hws_action_get_default_stc(ctx, tbl->type);
+       if (ret)
+               goto tbl_destroy;
+
+       INIT_LIST_HEAD(&tbl->matchers_list);
+       INIT_LIST_HEAD(&tbl->default_miss.head);
+
+       mutex_unlock(&ctx->ctrl_lock);
+
+       return 0;
+
+tbl_destroy:
+       mlx5hws_table_destroy_default_ft(tbl, tbl->ft_id);
+       mutex_unlock(&ctx->ctrl_lock);
+       return ret;
+}
+
+static void hws_table_uninit(struct mlx5hws_table *tbl)
+{
+       mutex_lock(&tbl->ctx->ctrl_lock);
+       mlx5hws_action_put_default_stc(tbl->ctx, tbl->type);
+       mlx5hws_table_destroy_default_ft(tbl, tbl->ft_id);
+       mutex_unlock(&tbl->ctx->ctrl_lock);
+}
+
+struct mlx5hws_table *mlx5hws_table_create(struct mlx5hws_context *ctx,
+                                          struct mlx5hws_table_attr *attr)
+{
+       struct mlx5hws_table *tbl;
+       int ret;
+
+       if (attr->type > MLX5HWS_TABLE_TYPE_FDB) {
+               mlx5hws_err(ctx, "Invalid table type %d\n", attr->type);
+               return NULL;
+       }
+
+       tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
+       if (!tbl)
+               return NULL;
+
+       tbl->ctx = ctx;
+       tbl->type = attr->type;
+       tbl->level = attr->level;
+
+       ret = hws_table_init(tbl);
+       if (ret) {
+               mlx5hws_err(ctx, "Failed to initialise table\n");
+               goto free_tbl;
+       }
+
+       mutex_lock(&ctx->ctrl_lock);
+       list_add(&tbl->tbl_list_node, &ctx->tbl_list);
+       mutex_unlock(&ctx->ctrl_lock);
+
+       return tbl;
+
+free_tbl:
+       kfree(tbl);
+       return NULL;
+}
+
+int mlx5hws_table_destroy(struct mlx5hws_table *tbl)
+{
+       struct mlx5hws_context *ctx = tbl->ctx;
+       int ret;
+
+       mutex_lock(&ctx->ctrl_lock);
+       if (!list_empty(&tbl->matchers_list)) {
+               mlx5hws_err(tbl->ctx, "Cannot destroy table containing matchers\n");
+               ret = -EBUSY;
+               goto unlock_err;
+       }
+
+       if (!list_empty(&tbl->default_miss.head)) {
+               mlx5hws_err(tbl->ctx, "Cannot destroy table pointed by default miss\n");
+               ret = -EBUSY;
+               goto unlock_err;
+       }
+
+       list_del_init(&tbl->tbl_list_node);
+       mutex_unlock(&ctx->ctrl_lock);
+
+       hws_table_uninit(tbl);
+       kfree(tbl);
+
+       return 0;
+
+unlock_err:
+       mutex_unlock(&ctx->ctrl_lock);
+       return ret;
+}
+
+static u32 hws_table_get_last_ft(struct mlx5hws_table *tbl)
+{
+       struct mlx5hws_matcher *matcher;
+
+       if (list_empty(&tbl->matchers_list))
+               return tbl->ft_id;
+
+       matcher = list_last_entry(&tbl->matchers_list, struct mlx5hws_matcher, list_node);
+       return matcher->end_ft_id;
+}
+
+int mlx5hws_table_ft_set_default_next_ft(struct mlx5hws_table *tbl, u32 ft_id)
+{
+       struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+       int ret;
+
+       /* Due to FW limitation, resetting the flow table to default action will
+        * disconnect RTC when ignore_flow_level_rtc_valid is not supported.
+        */
+       if (!tbl->ctx->caps->nic_ft.ignore_flow_level_rtc_valid)
+               return 0;
+
+       if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
+               return hws_table_connect_to_default_miss_tbl(tbl, ft_id);
+
+       ft_attr.type = tbl->fw_ft_type;
+       ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
+       ft_attr.table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_DEFAULT;
+
+       ret = mlx5hws_cmd_flow_table_modify(tbl->ctx->mdev, &ft_attr, ft_id);
+       if (ret) {
+               mlx5hws_err(tbl->ctx, "Failed to set FT default miss action\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx,
+                                 u32 ft_id,
+                                 u32 fw_ft_type,
+                                 u32 rtc_0_id,
+                                 u32 rtc_1_id)
+{
+       struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+
+       ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID;
+       ft_attr.type = fw_ft_type;
+       ft_attr.rtc_id_0 = rtc_0_id;
+       ft_attr.rtc_id_1 = rtc_1_id;
+
+       return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id);
+}
+
+static int hws_table_ft_set_next_ft(struct mlx5hws_context *ctx,
+                                   u32 ft_id,
+                                   u32 fw_ft_type,
+                                   u32 next_ft_id)
+{
+       struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+
+       ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
+       ft_attr.table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
+       ft_attr.type = fw_ft_type;
+       ft_attr.table_miss_id = next_ft_id;
+
+       return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id);
+}
+
+int mlx5hws_table_update_connected_miss_tables(struct mlx5hws_table *dst_tbl)
+{
+       struct mlx5hws_table *src_tbl;
+       int ret;
+
+       if (list_empty(&dst_tbl->default_miss.head))
+               return 0;
+
+       list_for_each_entry(src_tbl, &dst_tbl->default_miss.head, default_miss.next) {
+               ret = mlx5hws_table_connect_to_miss_table(src_tbl, dst_tbl);
+               if (ret) {
+                       mlx5hws_err(dst_tbl->ctx,
+                                   "Failed to update source miss table, unexpected behavior\n");
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl,
+                                       struct mlx5hws_table *dst_tbl)
+{
+       struct mlx5hws_matcher *matcher;
+       u32 last_ft_id;
+       int ret;
+
+       last_ft_id = hws_table_get_last_ft(src_tbl);
+
+       if (dst_tbl) {
+               if (list_empty(&dst_tbl->matchers_list)) {
+                       /* Connect src_tbl last_ft to dst_tbl start anchor */
+                       ret = hws_table_ft_set_next_ft(src_tbl->ctx,
+                                                      last_ft_id,
+                                                      src_tbl->fw_ft_type,
+                                                      dst_tbl->ft_id);
+                       if (ret)
+                               return ret;
+
+                       /* Reset last_ft RTC to default RTC */
+                       ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx,
+                                                           last_ft_id,
+                                                           src_tbl->fw_ft_type,
+                                                           0, 0);
+                       if (ret)
+                               return ret;
+               } else {
+                       /* Connect src_tbl last_ft to first matcher RTC */
+                       matcher = list_first_entry(&dst_tbl->matchers_list,
+                                                  struct mlx5hws_matcher,
+                                                  list_node);
+                       ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx,
+                                                           last_ft_id,
+                                                           src_tbl->fw_ft_type,
+                                                           matcher->match_ste.rtc_0_id,
+                                                           matcher->match_ste.rtc_1_id);
+                       if (ret)
+                               return ret;
+
+                       /* Reset next miss FT to default */
+                       ret = mlx5hws_table_ft_set_default_next_ft(src_tbl, last_ft_id);
+                       if (ret)
+                               return ret;
+               }
+       } else {
+               /* Reset next miss FT to default */
+               ret = mlx5hws_table_ft_set_default_next_ft(src_tbl, last_ft_id);
+               if (ret)
+                       return ret;
+
+               /* Reset last_ft RTC to default RTC */
+               ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx,
+                                                   last_ft_id,
+                                                   src_tbl->fw_ft_type,
+                                                   0, 0);
+               if (ret)
+                       return ret;
+       }
+
+       src_tbl->default_miss.miss_tbl = dst_tbl;
+
+       return 0;
+}
+
+static int hws_table_set_default_miss_not_valid(struct mlx5hws_table *tbl,
+                                               struct mlx5hws_table *miss_tbl)
+{
+       if (!tbl->ctx->caps->nic_ft.ignore_flow_level_rtc_valid) {
+               mlx5hws_err(tbl->ctx, "Default miss table is not supported\n");
+               return -EOPNOTSUPP;
+       }
+
+       if ((miss_tbl && miss_tbl->type != tbl->type)) {
+               mlx5hws_err(tbl->ctx, "Invalid arguments\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl,
+                                  struct mlx5hws_table *miss_tbl)
+{
+       struct mlx5hws_context *ctx = tbl->ctx;
+       struct mlx5hws_table *old_miss_tbl;
+       int ret;
+
+       ret = hws_table_set_default_miss_not_valid(tbl, miss_tbl);
+       if (ret)
+               return ret;
+
+       mutex_lock(&ctx->ctrl_lock);
+
+       old_miss_tbl = tbl->default_miss.miss_tbl;
+       ret = mlx5hws_table_connect_to_miss_table(tbl, miss_tbl);
+       if (ret)
+               goto out;
+
+       if (old_miss_tbl)
+               list_del_init(&tbl->default_miss.next);
+
+       old_miss_tbl = tbl->default_miss.miss_tbl;
+       if (old_miss_tbl)
+               list_del_init(&old_miss_tbl->default_miss.head);
+
+       if (miss_tbl)
+               list_add(&tbl->default_miss.next, &miss_tbl->default_miss.head);
+
+       mutex_unlock(&ctx->ctrl_lock);
+       return 0;
+out:
+       mutex_unlock(&ctx->ctrl_lock);
+       return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
new file mode 100644 (file)
index 0000000..dd50420
--- /dev/null
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_TABLE_H_
+#define MLX5HWS_TABLE_H_
+
+struct mlx5hws_default_miss {
+       /* My miss table */
+       struct mlx5hws_table *miss_tbl;
+       struct list_head next;
+       /* Tables missing to my table */
+       struct list_head head;
+};
+
+struct mlx5hws_table {
+       struct mlx5hws_context *ctx;
+       u32 ft_id;
+       enum mlx5hws_table_type type;
+       u32 fw_ft_type;
+       u32 level;
+       struct list_head matchers_list;
+       struct list_head tbl_list_node;
+       struct mlx5hws_default_miss default_miss;
+};
+
+static inline
+u32 mlx5hws_table_get_fw_ft_type(enum mlx5hws_table_type type,
+                                u8 *ret_type)
+{
+       if (type != MLX5HWS_TABLE_TYPE_FDB)
+               return -EOPNOTSUPP;
+
+       *ret_type = FS_FT_FDB;
+
+       return 0;
+}
+
+static inline
+u32 mlx5hws_table_get_res_fw_ft_type(enum mlx5hws_table_type tbl_type,
+                                    bool is_mirror)
+{
+       if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
+               return is_mirror ? FS_FT_FDB_TX : FS_FT_FDB_RX;
+
+       return 0;
+}
+
+int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
+                                   struct mlx5hws_table *tbl,
+                                   u32 *ft_id);
+
+void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl,
+                                     u32 ft_id);
+
+int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl,
+                                       struct mlx5hws_table *dst_tbl);
+
+int mlx5hws_table_update_connected_miss_tables(struct mlx5hws_table *dst_tbl);
+
+int mlx5hws_table_ft_set_default_next_ft(struct mlx5hws_table *tbl, u32 ft_id);
+
+int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx,
+                                 u32 ft_id,
+                                 u32 fw_ft_type,
+                                 u32 rtc_0_id,
+                                 u32 rtc_1_id);
+
+#endif /* MLX5HWS_TABLE_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.c
new file mode 100644 (file)
index 0000000..d8e382b
--- /dev/null
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+int mlx5hws_vport_init_vports(struct mlx5hws_context *ctx)
+{
+       int ret;
+
+       if (!ctx->caps->eswitch_manager)
+               return 0;
+
+       xa_init(&ctx->vports.vport_gvmi_xa);
+
+       /* Set gvmi for eswitch manager and uplink vports only. Rest of the vports
+        * (vport 0 of other function, VFs and SFs) will be queried dynamically.
+        */
+
+       ret = mlx5hws_cmd_query_gvmi(ctx->mdev, false, 0, &ctx->vports.esw_manager_gvmi);
+       if (ret)
+               return ret;
+
+       ctx->vports.uplink_gvmi = 0;
+       return 0;
+}
+
+void mlx5hws_vport_uninit_vports(struct mlx5hws_context *ctx)
+{
+       if (ctx->caps->eswitch_manager)
+               xa_destroy(&ctx->vports.vport_gvmi_xa);
+}
+
+static int hws_vport_add_gvmi(struct mlx5hws_context *ctx, u16 vport)
+{
+       u16 vport_gvmi;
+       int ret;
+
+       ret = mlx5hws_cmd_query_gvmi(ctx->mdev, true, vport, &vport_gvmi);
+       if (ret)
+               return -EINVAL;
+
+       ret = xa_insert(&ctx->vports.vport_gvmi_xa, vport,
+                       xa_mk_value(vport_gvmi), GFP_KERNEL);
+       if (ret)
+               mlx5hws_dbg(ctx, "Couldn't insert new vport gvmi into xarray (%d)\n", ret);
+
+       return ret;
+}
+
+static bool hws_vport_is_esw_mgr_vport(struct mlx5hws_context *ctx, u16 vport)
+{
+       return ctx->caps->is_ecpf ? vport == MLX5_VPORT_ECPF :
+                                   vport == MLX5_VPORT_PF;
+}
+
+int mlx5hws_vport_get_gvmi(struct mlx5hws_context *ctx, u16 vport, u16 *vport_gvmi)
+{
+       void *entry;
+       int ret;
+
+       if (!ctx->caps->eswitch_manager)
+               return -EINVAL;
+
+       if (hws_vport_is_esw_mgr_vport(ctx, vport)) {
+               *vport_gvmi = ctx->vports.esw_manager_gvmi;
+               return 0;
+       }
+
+       if (vport == MLX5_VPORT_UPLINK) {
+               *vport_gvmi = ctx->vports.uplink_gvmi;
+               return 0;
+       }
+
+load_entry:
+       entry = xa_load(&ctx->vports.vport_gvmi_xa, vport);
+
+       if (!xa_is_value(entry)) {
+               ret = hws_vport_add_gvmi(ctx, vport);
+               if (ret && ret != -EBUSY)
+                       return ret;
+               goto load_entry;
+       }
+
+       *vport_gvmi = (u16)xa_to_value(entry);
+       return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.h
new file mode 100644 (file)
index 0000000..0912fc1
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_VPORT_H_
+#define MLX5HWS_VPORT_H_
+
+int mlx5hws_vport_init_vports(struct mlx5hws_context *ctx);
+
+void mlx5hws_vport_uninit_vports(struct mlx5hws_context *ctx);
+
+int mlx5hws_vport_get_gvmi(struct mlx5hws_context *ctx, u16 vport, u16 *vport_gvmi);
+
+#endif /* MLX5HWS_VPORT_H_ */