net: xilinx: axienet: Fix RX skb ring management in DMAengine mode
authorSuraj Gupta <suraj.gupta2@amd.com>
Wed, 13 Aug 2025 13:55:59 +0000 (19:25 +0530)
committerJakub Kicinski <kuba@kernel.org>
Fri, 15 Aug 2025 00:38:44 +0000 (17:38 -0700)
Submit multiple descriptors in axienet_rx_cb() to fill Rx skb ring. This
ensures the ring "catches up" on previously missed allocations.

Increment Rx skb ring head pointer after BD is successfully allocated.
Previously, head pointer was incremented before verifying if descriptor is
successfully allocated and has valid entries, which could lead to ring
state inconsistency if descriptor setup failed.

These changes improve reliability by maintaining adequate descriptor
availability and ensuring proper ring buffer state management.

Fixes: 6a91b846af85 ("net: axienet: Introduce dmaengine support")
Signed-off-by: Suraj Gupta <suraj.gupta2@amd.com>
Link: https://patch.msgid.link/20250813135559.1555652-1-suraj.gupta2@amd.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/xilinx/xilinx_axienet_main.c

index 6011d7eae0c78a4557e6e34fbb448dffea9f195a..0d8a05fe541afb584ef9e3c5ee3dfbb5da27d70c 100644 (file)
@@ -1160,6 +1160,7 @@ static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
        struct axienet_local *lp = data;
        struct sk_buff *skb;
        u32 *app_metadata;
+       int i;
 
        skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
        skb = skbuf_dma->skb;
@@ -1178,7 +1179,10 @@ static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
        u64_stats_add(&lp->rx_packets, 1);
        u64_stats_add(&lp->rx_bytes, rx_len);
        u64_stats_update_end(&lp->rx_stat_sync);
-       axienet_rx_submit_desc(lp->ndev);
+
+       for (i = 0; i < CIRC_SPACE(lp->rx_ring_head, lp->rx_ring_tail,
+                                  RX_BUF_NUM_DEFAULT); i++)
+               axienet_rx_submit_desc(lp->ndev);
        dma_async_issue_pending(lp->rx_chan);
 }
 
@@ -1457,7 +1461,6 @@ static void axienet_rx_submit_desc(struct net_device *ndev)
        if (!skbuf_dma)
                return;
 
-       lp->rx_ring_head++;
        skb = netdev_alloc_skb(ndev, lp->max_frm_size);
        if (!skb)
                return;
@@ -1482,6 +1485,7 @@ static void axienet_rx_submit_desc(struct net_device *ndev)
        skbuf_dma->desc = dma_rx_desc;
        dma_rx_desc->callback_param = lp;
        dma_rx_desc->callback_result = axienet_dma_rx_cb;
+       lp->rx_ring_head++;
        dmaengine_submit(dma_rx_desc);
 
        return;