mlxsw: spectrum_router: Add trap adjacency entry upon first nexthop group
authorIdo Schimmel <idosch@nvidia.com>
Wed, 22 Sep 2021 07:36:41 +0000 (10:36 +0300)
committerDavid S. Miller <davem@davemloft.net>
Wed, 22 Sep 2021 13:35:01 +0000 (14:35 +0100)
In commit 0c3cbbf96def ("mlxsw: Add specific trap for packets routed via
invalid nexthops"), mlxsw started allocating a new adjacency entry
during driver initialization, to trap packets routed via invalid
nexthops.

This behavior was later altered in commit 983db6198f0d ("mlxsw:
spectrum_router: Allocate discard adjacency entry when needed") to only
allocate the entry upon the first route that requires it. The motivation
for the change is explained in the commit message.

The problem with the current behavior is that the entry shows up as a
"leak" in a new BPF resource monitoring tool [1]. This is caused by the
asymmetry of the allocation/free scheme. While the entry is allocated
upon the first route that requires it, it is only freed during
de-initialization of the driver.

Instead, track the number of active nexthop groups and allocate the
adjacency entry upon the creation of the first group. Free it when the
number of active groups reaches zero.

The next patch will convert mlxsw to start using the new entry and
remove the old one.

[1] https://github.com/Mellanox/mlxsw/tree/master/Debugging/libbpf-tools/resmon

Signed-off-by: Ido Schimmel <idosch@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h

index 19bb3ca0515e212f6da2da8baf2b536084103cd9..00648e093351e92fefe4da7270775bc8038ec783 100644 (file)
@@ -4376,6 +4376,66 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
        }
 }
 
+static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp)
+{
+       enum mlxsw_reg_ratr_trap_action trap_action;
+       char ratr_pl[MLXSW_REG_RATR_LEN];
+       int err;
+
+       err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+                                 &mlxsw_sp->router->adj_trap_index);
+       if (err)
+               return err;
+
+       trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
+       mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
+                           MLXSW_REG_RATR_TYPE_ETHERNET,
+                           mlxsw_sp->router->adj_trap_index,
+                           mlxsw_sp->router->lb_rif_index);
+       mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
+       mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
+       if (err)
+               goto err_ratr_write;
+
+       return 0;
+
+err_ratr_write:
+       mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+                          mlxsw_sp->router->adj_trap_index);
+       return err;
+}
+
+static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp)
+{
+       mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+                          mlxsw_sp->router->adj_trap_index);
+}
+
+static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp)
+{
+       int err;
+
+       if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups))
+               return 0;
+
+       err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp);
+       if (err)
+               return err;
+
+       refcount_set(&mlxsw_sp->router->num_groups, 1);
+
+       return 0;
+}
+
+static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp)
+{
+       if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups))
+               return;
+
+       mlxsw_sp_adj_trap_entry_fini(mlxsw_sp);
+}
+
 static void
 mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
                             const struct mlxsw_sp_nexthop_group *nh_grp,
@@ -4790,6 +4850,9 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
                if (err)
                        goto err_nexthop_obj_init;
        }
+       err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
+       if (err)
+               goto err_group_inc;
        err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
        if (err) {
                NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
@@ -4808,6 +4871,8 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
        return 0;
 
 err_group_refresh:
+       mlxsw_sp_nexthop_group_dec(mlxsw_sp);
+err_group_inc:
        i = nhgi->count;
 err_nexthop_obj_init:
        for (i--; i >= 0; i--) {
@@ -4832,6 +4897,7 @@ mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
                        cancel_delayed_work(&router->nh_grp_activity_dw);
        }
 
+       mlxsw_sp_nexthop_group_dec(mlxsw_sp);
        for (i = nhgi->count - 1; i >= 0; i--) {
                struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
 
@@ -5223,6 +5289,9 @@ mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
                if (err)
                        goto err_nexthop4_init;
        }
+       err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
+       if (err)
+               goto err_group_inc;
        err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
        if (err)
                goto err_group_refresh;
@@ -5230,6 +5299,8 @@ mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
        return 0;
 
 err_group_refresh:
+       mlxsw_sp_nexthop_group_dec(mlxsw_sp);
+err_group_inc:
        i = nhgi->count;
 err_nexthop4_init:
        for (i--; i >= 0; i--) {
@@ -5247,6 +5318,7 @@ mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
        struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
        int i;
 
+       mlxsw_sp_nexthop_group_dec(mlxsw_sp);
        for (i = nhgi->count - 1; i >= 0; i--) {
                struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
 
@@ -6641,6 +6713,9 @@ mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
                mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
        }
        nh_grp->nhgi = nhgi;
+       err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
+       if (err)
+               goto err_group_inc;
        err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
        if (err)
                goto err_group_refresh;
@@ -6648,6 +6723,8 @@ mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
        return 0;
 
 err_group_refresh:
+       mlxsw_sp_nexthop_group_dec(mlxsw_sp);
+err_group_inc:
        i = nhgi->count;
 err_nexthop6_init:
        for (i--; i >= 0; i--) {
@@ -6665,6 +6742,7 @@ mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
        struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
        int i;
 
+       mlxsw_sp_nexthop_group_dec(mlxsw_sp);
        for (i = nhgi->count - 1; i >= 0; i--) {
                struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
 
index 25d3eae6350133ec35433f37ad3cfb14a76dc311..0c4f5bf4efd9bf589a27fb9b9a47ec89a9d58e36 100644 (file)
@@ -82,6 +82,8 @@ struct mlxsw_sp_router {
        struct delayed_work nh_grp_activity_dw;
        struct list_head nh_res_grp_list;
        bool inc_parsing_depth;
+       refcount_t num_groups;
+       u32 adj_trap_index;
 };
 
 struct mlxsw_sp_fib_entry_priv {