net/mlx5e: Add recovery flow in case of error CQE
authorGal Pressman <gal@nvidia.com>
Wed, 22 Dec 2021 12:03:39 +0000 (14:03 +0200)
committerSaeed Mahameed <saeedm@nvidia.com>
Fri, 7 Jan 2022 00:22:55 +0000 (16:22 -0800)
The rep legacy RQ completion handling was missing the appropriate
handling of error CQEs (dump the CQE and queue a recover work), fix it
by calling trigger_report() when needed.

Since all CQE handling flows do the exact same error CQE handling,
extract it to a common helper function.

Signed-off-by: Gal Pressman <gal@nvidia.com>
Reviewed-by: Aya Levin <ayal@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

index f09b57c31ed7a5a14969b6941810bc192dca93bb..96e260fd798785dcfdaa1b683f3677e47150b627 100644 (file)
@@ -1603,6 +1603,12 @@ static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
        }
 }
 
+static void mlx5e_handle_rx_err_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+       trigger_report(rq, cqe);
+       rq->stats->wqe_err++;
+}
+
 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
 {
        struct mlx5_wq_cyc *wq = &rq->wqe.wq;
@@ -1616,8 +1622,7 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
        cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
 
        if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
-               trigger_report(rq, cqe);
-               rq->stats->wqe_err++;
+               mlx5e_handle_rx_err_cqe(rq, cqe);
                goto free_wqe;
        }
 
@@ -1670,7 +1675,7 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
        cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
 
        if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
-               rq->stats->wqe_err++;
+               mlx5e_handle_rx_err_cqe(rq, cqe);
                goto free_wqe;
        }
 
@@ -1719,8 +1724,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
        wi->consumed_strides += cstrides;
 
        if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
-               trigger_report(rq, cqe);
-               rq->stats->wqe_err++;
+               mlx5e_handle_rx_err_cqe(rq, cqe);
                goto mpwrq_cqe_out;
        }
 
@@ -1988,8 +1992,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
        wi->consumed_strides += cstrides;
 
        if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
-               trigger_report(rq, cqe);
-               stats->wqe_err++;
+               mlx5e_handle_rx_err_cqe(rq, cqe);
                goto mpwrq_cqe_out;
        }
 
@@ -2058,8 +2061,7 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
        wi->consumed_strides += cstrides;
 
        if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
-               trigger_report(rq, cqe);
-               rq->stats->wqe_err++;
+               mlx5e_handle_rx_err_cqe(rq, cqe);
                goto mpwrq_cqe_out;
        }