wifi: mt76: do not hold queue lock during initial rx buffer alloc
authorQuan Zhou <quan.zhou@mediatek.com>
Fri, 8 Nov 2024 12:59:40 +0000 (20:59 +0800)
committerFelix Fietkau <nbd@nbd.name>
Tue, 14 Jan 2025 12:34:35 +0000 (13:34 +0100)
In dma init or reset scene, full buffer is needed for all rx rings. Since
this is very time consuming, split the function to perform initial
allocation without holding the spinlock. This avoids causing excessive
scheduler latency.

Signed-off-by: Quan Zhou <quan.zhou@mediatek.com>
Reviewed-by: Shayne Chen <shayne.chen@mediatek.com>
Reviewed-by: Deren Wu <deren.wu@mediatek.com>
Link: https://patch.msgid.link/57c68a7ce1dd9022fa5e06af2c53d6313f30ec83.1731069062.git.quan.zhou@mediatek.com
Signed-off-by: Felix Fietkau <nbd@nbd.name>
drivers/net/wireless/mediatek/mt76/dma.c

index 5f46d6daeaa7c531bf3952813c8f887ce013c649..844af16ee551313bc2b6f5c43cd89d096cf46e49 100644 (file)
@@ -631,7 +631,8 @@ free_skb:
        return ret;
 }
 
-int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
+static int
+mt76_dma_rx_fill_buf(struct mt76_dev *dev, struct mt76_queue *q,
                     bool allow_direct)
 {
        int len = SKB_WITH_OVERHEAD(q->buf_size);
@@ -640,8 +641,6 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
        if (!q->ndesc)
                return 0;
 
-       spin_lock_bh(&q->lock);
-
        while (q->queued < q->ndesc - 1) {
                struct mt76_queue_buf qbuf = {};
                enum dma_data_direction dir;
@@ -674,6 +673,19 @@ done:
        if (frames || mt76_queue_is_wed_rx(q))
                mt76_dma_kick_queue(dev, q);
 
+       return frames;
+}
+
+int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
+                    bool allow_direct)
+{
+       int frames;
+
+       if (!q->ndesc)
+               return 0;
+
+       spin_lock_bh(&q->lock);
+       frames = mt76_dma_rx_fill_buf(dev, q, allow_direct);
        spin_unlock_bh(&q->lock);
 
        return frames;
@@ -796,7 +808,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
                return;
 
        mt76_dma_sync_idx(dev, q);
-       mt76_dma_rx_fill(dev, q, false);
+       mt76_dma_rx_fill_buf(dev, q, false);
 }
 
 static void
@@ -969,7 +981,7 @@ mt76_dma_init(struct mt76_dev *dev,
 
        mt76_for_each_q_rx(dev, i) {
                netif_napi_add(dev->napi_dev, &dev->napi[i], poll);
-               mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
+               mt76_dma_rx_fill_buf(dev, &dev->q_rx[i], false);
                napi_enable(&dev->napi[i]);
        }