bnxt_en: Refactor bnxt_free_tx_rings() to free per TX ring
authorSomnath Kotur <somnath.kotur@broadcom.com>
Thu, 13 Feb 2025 01:12:33 +0000 (17:12 -0800)
committerJakub Kicinski <kuba@kernel.org>
Sat, 15 Feb 2025 03:50:22 +0000 (19:50 -0800)
Modify bnxt_free_tx_rings() to free the skbs per TX ring.
This will be useful later in the series.

Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Link: https://patch.msgid.link/20250213011240.1640031-6-michael.chan@broadcom.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/broadcom/bnxt/bnxt.c

index 52d4dc22275925a60ea102da197b755d4ee87289..453f5264814527d0cb93c9d51b6e80d9ef0cd430 100644 (file)
@@ -3314,74 +3314,81 @@ poll_done:
        return work_done;
 }
 
-static void bnxt_free_tx_skbs(struct bnxt *bp)
+static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
+                                      struct bnxt_tx_ring_info *txr, int idx)
 {
        int i, max_idx;
        struct pci_dev *pdev = bp->pdev;
 
-       if (!bp->tx_ring)
-               return;
-
        max_idx = bp->tx_nr_pages * TX_DESC_CNT;
-       for (i = 0; i < bp->tx_nr_rings; i++) {
-               struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
-               int j;
 
-               if (!txr->tx_buf_ring)
+       for (i = 0; i < max_idx;) {
+               struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i];
+               struct sk_buff *skb;
+               int j, last;
+
+               if (idx  < bp->tx_nr_rings_xdp &&
+                   tx_buf->action == XDP_REDIRECT) {
+                       dma_unmap_single(&pdev->dev,
+                                        dma_unmap_addr(tx_buf, mapping),
+                                        dma_unmap_len(tx_buf, len),
+                                        DMA_TO_DEVICE);
+                       xdp_return_frame(tx_buf->xdpf);
+                       tx_buf->action = 0;
+                       tx_buf->xdpf = NULL;
+                       i++;
                        continue;
+               }
 
-               for (j = 0; j < max_idx;) {
-                       struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
-                       struct sk_buff *skb;
-                       int k, last;
-
-                       if (i < bp->tx_nr_rings_xdp &&
-                           tx_buf->action == XDP_REDIRECT) {
-                               dma_unmap_single(&pdev->dev,
-                                       dma_unmap_addr(tx_buf, mapping),
-                                       dma_unmap_len(tx_buf, len),
-                                       DMA_TO_DEVICE);
-                               xdp_return_frame(tx_buf->xdpf);
-                               tx_buf->action = 0;
-                               tx_buf->xdpf = NULL;
-                               j++;
-                               continue;
-                       }
+               skb = tx_buf->skb;
+               if (!skb) {
+                       i++;
+                       continue;
+               }
 
-                       skb = tx_buf->skb;
-                       if (!skb) {
-                               j++;
-                               continue;
-                       }
+               tx_buf->skb = NULL;
 
-                       tx_buf->skb = NULL;
+               if (tx_buf->is_push) {
+                       dev_kfree_skb(skb);
+                       i += 2;
+                       continue;
+               }
 
-                       if (tx_buf->is_push) {
-                               dev_kfree_skb(skb);
-                               j += 2;
-                               continue;
-                       }
+               dma_unmap_single(&pdev->dev,
+                                dma_unmap_addr(tx_buf, mapping),
+                                skb_headlen(skb),
+                                DMA_TO_DEVICE);
 
-                       dma_unmap_single(&pdev->dev,
-                                        dma_unmap_addr(tx_buf, mapping),
-                                        skb_headlen(skb),
-                                        DMA_TO_DEVICE);
+               last = tx_buf->nr_frags;
+               i += 2;
+               for (j = 0; j < last; j++, i++) {
+                       int ring_idx = i & bp->tx_ring_mask;
+                       skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
 
-                       last = tx_buf->nr_frags;
-                       j += 2;
-                       for (k = 0; k < last; k++, j++) {
-                               int ring_idx = j & bp->tx_ring_mask;
-                               skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
-
-                               tx_buf = &txr->tx_buf_ring[ring_idx];
-                               dma_unmap_page(
-                                       &pdev->dev,
-                                       dma_unmap_addr(tx_buf, mapping),
-                                       skb_frag_size(frag), DMA_TO_DEVICE);
-                       }
-                       dev_kfree_skb(skb);
+                       tx_buf = &txr->tx_buf_ring[ring_idx];
+                       dma_unmap_page(&pdev->dev,
+                                      dma_unmap_addr(tx_buf, mapping),
+                                      skb_frag_size(frag), DMA_TO_DEVICE);
                }
-               netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
+               dev_kfree_skb(skb);
+       }
+       netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx));
+}
+
+static void bnxt_free_tx_skbs(struct bnxt *bp)
+{
+       int i;
+
+       if (!bp->tx_ring)
+               return;
+
+       for (i = 0; i < bp->tx_nr_rings; i++) {
+               struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+
+               if (!txr->tx_buf_ring)
+                       continue;
+
+               bnxt_free_one_tx_ring_skbs(bp, txr, i);
        }
 }