net/mlx5e: Use the aligned max TX MPWQE size
authorMaxim Mikityanskiy <maximmi@nvidia.com>
Tue, 27 Sep 2022 20:36:04 +0000 (13:36 -0700)
committerJakub Kicinski <kuba@kernel.org>
Thu, 29 Sep 2022 02:36:36 +0000 (19:36 -0700)
TX MPWQE size is limited to the cacheline-aligned maximum. Use the same
value for the stop room and the capability check.

Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Reviewed-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c

index 2be09cc3c4378883fb0ce2d3574d9c02bcace28f..2c8fe2e60e17b9634a344799968449148356d93b 100644 (file)
@@ -209,11 +209,11 @@ u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par
        stop_room  = mlx5e_ktls_get_stop_room(mdev, params);
        stop_room += mlx5e_stop_room_for_max_wqe(mdev);
        if (is_mpwqe)
-               /* A MPWQE can take up to the maximum-sized WQE + all the normal
-                * stop room can be taken if a new packet breaks the active
-                * MPWQE session and allocates its WQEs right away.
+               /* A MPWQE can take up to the maximum cacheline-aligned WQE +
+                * all the normal stop room can be taken if a new packet breaks
+                * the active MPWQE session and allocates its WQEs right away.
                 */
-               stop_room += mlx5e_stop_room_for_max_wqe(mdev);
+               stop_room += mlx5e_stop_room_for_mpwqe(mdev);
 
        return stop_room;
 }
index c208ea307bffbae3d226b66c92c192736b219490..8751e48e283d7dd03443976a87dd01fc90ab9ddb 100644 (file)
@@ -439,6 +439,13 @@ static inline u16 mlx5e_stop_room_for_max_wqe(struct mlx5_core_dev *mdev)
        return MLX5E_STOP_ROOM(mlx5e_get_max_sq_wqebbs(mdev));
 }
 
+static inline u16 mlx5e_stop_room_for_mpwqe(struct mlx5_core_dev *mdev)
+{
+       u8 mpwqe_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
+
+       return mlx5e_stop_room_for_wqe(mdev, mpwqe_wqebbs);
+}
+
 static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
 {
        u16 room = sq->reserved_room;
index 688a6589b3b86ff0d455d24ef15622c75d2c8d7e..9b48ae61f6922c85370b197425021f364ad2a53c 100644 (file)
@@ -75,7 +75,7 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
 
        striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
                          MLX5_CAP_ETH(mdev, reg_umr_sq);
-       max_wqe_sz_cap = mlx5e_get_max_sq_wqebbs(mdev) * MLX5_SEND_WQE_BB;
+       max_wqe_sz_cap = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB;
        inline_umr = max_wqe_sz_cap >= MLX5E_UMR_WQE_INLINE_SZ;
        if (!striding_rq_umr)
                return false;
@@ -1155,7 +1155,8 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
                is_redirect ?
                        &c->priv->channel_stats[c->ix]->xdpsq :
                        &c->priv->channel_stats[c->ix]->rq_xdpsq;
-       sq->stop_room = mlx5e_stop_room_for_max_wqe(mdev);
+       sq->stop_room = param->is_mpw ? mlx5e_stop_room_for_mpwqe(mdev) :
+                                       mlx5e_stop_room_for_max_wqe(mdev);
        sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
 
        param->wq.db_numa_node = cpu_to_node(c->cpu);