net/mlx5e: SHAMPO, Fix page_index calculation inconsistency
authorDragos Tatulea <dtatulea@nvidia.com>
Thu, 7 Nov 2024 19:43:54 +0000 (21:43 +0200)
committerJakub Kicinski <kuba@kernel.org>
Tue, 12 Nov 2024 03:28:18 +0000 (19:28 -0800)
When calculating the index for the next frag page slot, the divisor is
incorrect: it should be the number of pages per queue not the number of
headers per queue. This is currently harmless because frag pages are not
used directly, but they are intermediated through the info array. But it
needs to be fixed as an upcoming patch will get rid of the info array.

This patch introduces a new pages per queue variable and plugs it in the
formula.

Now that this variable exists, additional code can be simplified in the
SHAMPO initialization code.

Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/20241107194357.683732-10-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

index 4449a57ba5b2da2f6b29289f645156745993e622..b4abb094f01a402a840daa1ca607a4fd028925ce 100644 (file)
@@ -629,6 +629,7 @@ struct mlx5e_shampo_hd {
        u16 curr_page_index;
        u32 hd_per_wq;
        u16 hd_per_wqe;
+       u16 pages_per_wq;
        unsigned long *bitmap;
        u16 pi;
        u16 ci;
index 59d7a0e28f24b7941ebfa0bc4544f71e7d4bb923..3ca1ef1f39a532d17d33068b3a1eb6db6a9c6758 100644 (file)
@@ -767,8 +767,6 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
                                u32 *pool_size,
                                int node)
 {
-       void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
-       int wq_size;
        int err;
 
        if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
@@ -793,9 +791,9 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
                cpu_to_be32(rq->mpwqe.shampo->mkey);
        rq->mpwqe.shampo->hd_per_wqe =
                mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
-       wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
-       *pool_size += (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
-                    MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
+       rq->mpwqe.shampo->pages_per_wq =
+               rq->mpwqe.shampo->hd_per_wq / MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
+       *pool_size += rq->mpwqe.shampo->pages_per_wq;
        return 0;
 
 err_hw_gro_data:
index e044e5d11f056698b25015cbdcd2d232ac2d9a45..76a975667c77bba415572dc0293b90d152310e81 100644 (file)
@@ -671,7 +671,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
                header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
                        MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
                if (!(header_offset & (PAGE_SIZE - 1))) {
-                       page_index = (page_index + 1) & (shampo->hd_per_wq - 1);
+                       page_index = (page_index + 1) & (shampo->pages_per_wq - 1);
                        frag_page = &shampo->pages[page_index];
 
                        err = mlx5e_page_alloc_fragmented(rq, frag_page);