net/mlx5e: RX, Defer page release in striding rq for better recycling
authorDragos Tatulea <dtatulea@nvidia.com>
Tue, 14 Feb 2023 10:01:40 +0000 (12:01 +0200)
committerSaeed Mahameed <saeedm@nvidia.com>
Tue, 28 Mar 2023 20:43:58 +0000 (13:43 -0700)
Currently, for striding RQ, fragmented pages from the page pool can
get released in two ways:

1) In the mlx5e driver when trimming off the unused fragments AND the
   associated skb fragments have been released. This path allows
   recycling of pages to the page pool cache (allow_direct == true).

2) On the skb release path (last fragment release), which
   will always release pages to the page pool ring
   (allow_direct == false).

Whichever is releasing the last fragment will be decisive on
where the page gets released: the cache or the ring. So we
obviously want to maximize for doing the release from 1.

This patch does that by deferring the release of page fragments
right before requesting new ones from the page pool. Extra care
needs to be taken for the corner cases:

* On first call, make sure that release is not called. The
  skip_release_bitmap is used for this purpose.

* On rq shutdown, make sure that all wqes that were not
  in the linked list are released.

For a single ring, single core, default MTU (1500) TCP stream
test the number of pages allocated from the cache directly
(rx_pp_recycle_cached) increases from 31 % to 98 %:

+----------------------------------------------+
| Page Pool stats (/sec)  |  Before |   After  |
+-------------------------+---------+----------+
|rx_pp_alloc_fast         | 2137754 |  2261033 |
|rx_pp_alloc_slow         |      47 |        9 |
|rx_pp_alloc_empty        |      47 |        9 |
|rx_pp_alloc_refill       |   23230 |      819 |
|rx_pp_alloc_waive        |       0 |        0 |
|rx_pp_recycle_cached     |  672182 |  2209015 |
|rx_pp_recycle_cache_full |    1789 |        0 |
|rx_pp_recycle_ring       | 1485848 |    52259 |
|rx_pp_recycle_ring_full  |    3003 |      584 |
+----------------------------------------------+

With this patch, the performance in striding rq for the above test is
back to baseline.

Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

index b621f735cdc356b7af5e6eb5c514402d233e2467..a047a2a4ddacc6354a8ef3e958ee603ae4216003 100644 (file)
@@ -121,9 +121,9 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
 
        mlx5e_reset_icosq_cc_pc(icosq);
 
-       mlx5e_free_rx_in_progress_descs(rq);
+       mlx5e_free_rx_missing_descs(rq);
        if (xskrq)
-               mlx5e_free_rx_in_progress_descs(xskrq);
+               mlx5e_free_rx_missing_descs(xskrq);
 
        clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
        mlx5e_activate_icosq(icosq);
index cd7779a9d0463d9c09a41beef78070cd7ef24281..651be7aaf7d514e508e34b1cb9ae8b6b71690c32 100644 (file)
@@ -69,7 +69,7 @@ INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq));
 INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq));
 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
 void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
-void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
+void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq);
 
 static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
 {
index eca9a11454e5d39d7d3ca1dd4978bb279077018e..53eef689f2258aa6b8891c1ffb819ffba99e9fd9 100644 (file)
@@ -301,6 +301,15 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
        if (!rq->mpwqe.info)
                return -ENOMEM;
 
+       /* For deferred page release (release right before alloc), make sure
+        * that on first round release is not called.
+        */
+       for (int i = 0; i < wq_sz; i++) {
+               struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, i);
+
+               bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
+       }
+
        mlx5e_build_umr_wqe(rq, rq->icosq, &rq->mpwqe.umr_wqe);
 
        return 0;
@@ -1112,7 +1121,7 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
        return -ETIMEDOUT;
 }
 
-void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq)
+void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq)
 {
        struct mlx5_wq_ll *wq;
        u16 head;
@@ -1124,8 +1133,12 @@ void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq)
        wq = &rq->mpwqe.wq;
        head = wq->head;
 
-       /* Outstanding UMR WQEs (in progress) start at wq->head */
-       for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
+       /* Release WQEs that are in missing state: they have been
+        * popped from the list after completion but were not freed
+        * due to deferred release.
+        * Also free the linked-list reserved entry, hence the "+ 1".
+        */
+       for (i = 0; i < mlx5_wq_ll_missing(wq) + 1; i++) {
                rq->dealloc_wqe(rq, head);
                head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
        }
@@ -1152,7 +1165,7 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
        if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
                struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
 
-               mlx5e_free_rx_in_progress_descs(rq);
+               mlx5e_free_rx_missing_descs(rq);
 
                while (!mlx5_wq_ll_is_empty(wq)) {
                        struct mlx5e_rx_wqe_ll *wqe;
index eab8cba33ce4fad4b6fb7717b611b842f66b2ba3..73bc373bf27d1103690526d2efc340d6c5cb2430 100644 (file)
@@ -983,6 +983,11 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
        head = rq->mpwqe.actual_wq_head;
        i = missing;
        do {
+               struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, head);
+
+               /* Deferred free for better page pool cache usage. */
+               mlx5e_free_rx_mpwqe(rq, wi, true);
+
                alloc_err = rq->xsk_pool ? mlx5e_xsk_alloc_rx_mpwqe(rq, head) :
                                           mlx5e_alloc_rx_mpwqe(rq, head);
 
@@ -1855,7 +1860,6 @@ mpwrq_cqe_out:
 
        wq  = &rq->mpwqe.wq;
        wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
-       mlx5e_free_rx_mpwqe(rq, wi, true);
        mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
 }
 
@@ -2173,7 +2177,6 @@ mpwrq_cqe_out:
 
        wq  = &rq->mpwqe.wq;
        wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
-       mlx5e_free_rx_mpwqe(rq, wi, true);
        mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
 }
 
@@ -2233,7 +2236,6 @@ mpwrq_cqe_out:
 
        wq  = &rq->mpwqe.wq;
        wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
-       mlx5e_free_rx_mpwqe(rq, wi, true);
        mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
 }