From: Benjamin Poirier Date: Wed, 16 Oct 2024 17:36:15 +0000 (+0300) Subject: net/mlx5: Only create VEPA flow table when in VEPA mode X-Git-Tag: v6.13-rc1~135^2~259^2~2 X-Git-Url: https://git.kernel.dk/?a=commitdiff_plain;h=f0ac6209460e8cc9bddf161d7210da5b72aa4846;p=linux-block.git net/mlx5: Only create VEPA flow table when in VEPA mode Currently, when VFs are created, two flow tables are added for the eswitch: the "fdb" table, which contains rules for each VF and the "vepa_fdb" table. In the default VEB mode, the vepa_fdb table is empty. When switching to VEPA mode, flow steering rules are added to vepa_fdb. Even though the vepa_fdb table is empty in VEB mode, its presence adds some cost to packet processing. In some workloads, this leads to drops which are reported by the rx_discards_phy ethtool counter. In order to improve performance, only create vepa_fdb when in VEPA mode. Tests were done on a ConnectX-6 Lx adapter forwarding 64B packets between both ports using dpdk-testpmd. Numbers are Rx-pps for each port, as reported by testpmd. Without changes: traffic to unknown mac testpmd on PF numvfs=0,0 35257998,35264499 numvfs=1,1 24590124,24590888 testpmd on VF with numvfs=1,1 20434338,20434887 traffic to VF mac testpmd on VF with numvfs=1,1 30341014,30340749 With changes: traffic to unknown mac testpmd on PF numvfs=0,0 35404361,35383378 numvfs=1,1 29801247,29790757 testpmd on VF with numvfs=1,1 24310435,24309084 traffic to VF mac testpmd on VF with numvfs=1,1 34811436,34781706 Signed-off-by: Benjamin Poirier Reviewed-by: Cosmin Ratiu Reviewed-by: Saeed Mahameed Signed-off-by: Tariq Toukan Signed-off-by: Paolo Abeni --- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c index 288c797e4a78..45183de424f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c @@ -176,20 +176,10 @@ static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw) static int esw_create_legacy_table(struct mlx5_eswitch *esw) { - int err; - memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb)); atomic64_set(&esw->user_count, 0); - err = esw_create_legacy_vepa_table(esw); - if (err) - return err; - - err = esw_create_legacy_fdb_table(esw); - if (err) - esw_destroy_legacy_vepa_table(esw); - - return err; + return esw_create_legacy_fdb_table(esw); } static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw) @@ -259,15 +249,22 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw, if (!setting) { esw_cleanup_vepa_rules(esw); + esw_destroy_legacy_vepa_table(esw); return 0; } if (esw->fdb_table.legacy.vepa_uplink_rule) return 0; + err = esw_create_legacy_vepa_table(esw); + if (err) + return err; + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) - return -ENOMEM; + if (!spec) { + err = -ENOMEM; + goto out; + } /* Uplink rule forward uplink traffic to FDB */ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); @@ -303,8 +300,10 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw, out: kvfree(spec); - if (err) + if (err) { esw_cleanup_vepa_rules(esw); + esw_destroy_legacy_vepa_table(esw); + } return err; }