net: bcmgenet: support reclaiming unsent Tx packets
authorDoug Berger <opendmb@gmail.com>
Thu, 6 Mar 2025 19:26:39 +0000 (11:26 -0800)
committerJakub Kicinski <kuba@kernel.org>
Sat, 8 Mar 2025 03:33:48 +0000 (19:33 -0800)
When disabling the transmitter any outstanding packets can now
be reclaimed by bcmgenet_tx_reclaim_all() rather than by the
bcmgenet_fini_dma() function.

Signed-off-by: Doug Berger <opendmb@gmail.com>
Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
Link: https://patch.msgid.link/20250306192643.2383632-12-opendmb@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/broadcom/genet/bcmgenet.c

index 38943bbc35b1470df55272958774c9789eaa51f5..0706c9635689fcb1880fb0eaaa2d83bbdabec8d3 100644 (file)
@@ -1903,12 +1903,39 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
 }
 
 static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
-                               struct bcmgenet_tx_ring *ring)
+                               struct bcmgenet_tx_ring *ring,
+                               bool all)
 {
-       unsigned int released;
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       struct device *kdev = &priv->pdev->dev;
+       unsigned int released, drop, wr_ptr;
+       struct enet_cb *cb_ptr;
+       struct sk_buff *skb;
 
        spin_lock_bh(&ring->lock);
        released = __bcmgenet_tx_reclaim(dev, ring);
+       if (all) {
+               skb = NULL;
+               drop = (ring->prod_index - ring->c_index) & DMA_C_INDEX_MASK;
+               released += drop;
+               ring->prod_index = ring->c_index & DMA_C_INDEX_MASK;
+               while (drop--) {
+                       cb_ptr = bcmgenet_put_txcb(priv, ring);
+                       skb = cb_ptr->skb;
+                       bcmgenet_free_tx_cb(kdev, cb_ptr);
+                       if (skb && cb_ptr == GENET_CB(skb)->first_cb) {
+                               dev_consume_skb_any(skb);
+                               skb = NULL;
+                       }
+               }
+               if (skb)
+                       dev_consume_skb_any(skb);
+               bcmgenet_tdma_ring_writel(priv, ring->index,
+                                         ring->prod_index, TDMA_PROD_INDEX);
+               wr_ptr = ring->write_ptr * WORDS_PER_BD(priv);
+               bcmgenet_tdma_ring_writel(priv, ring->index, wr_ptr,
+                                         TDMA_WRITE_PTR);
+       }
        spin_unlock_bh(&ring->lock);
 
        return released;
@@ -1945,7 +1972,7 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev)
        int i = 0;
 
        do {
-               bcmgenet_tx_reclaim(dev, &priv->tx_rings[i++]);
+               bcmgenet_tx_reclaim(dev, &priv->tx_rings[i++], true);
        } while (i <= priv->hw_params->tx_queues && netif_is_multiqueue(dev));
 }
 
@@ -2921,10 +2948,6 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
        bcmgenet_fini_rx_napi(priv);
        bcmgenet_fini_tx_napi(priv);
 
-       for (i = 0; i < priv->num_tx_bds; i++)
-               dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev,
-                                                 priv->tx_cbs + i));
-
        for (i = 0; i <= priv->hw_params->tx_queues; i++) {
                txq = netdev_get_tx_queue(priv->dev, i);
                netdev_tx_reset_queue(txq);