net: calxedaxgmac: rework transmit ring handling
authorRob Herring <rob.herring@calxeda.com>
Mon, 5 Nov 2012 06:22:23 +0000 (06:22 +0000)
committerDavid S. Miller <davem@davemloft.net>
Wed, 7 Nov 2012 08:51:14 +0000 (03:51 -0500)
Only generate tx interrupts on every ring size / 4 descriptors. Move the
netif_stop_queue call to the end of the xmit function rather than
checking at the beginning.

Signed-off-by: Rob Herring <rob.herring@calxeda.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/calxeda/xgmac.c

index 826321910efc481ff39ac4fb3cf03a0ac5ea1d96..362b35ed850b1fd9f0365e3cc9e3ed00e0f4aa92 100644 (file)
 #define DMA_INTR_ENA_TIE       0x00000001      /* Transmit Interrupt */
 
 #define DMA_INTR_NORMAL                (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
-                                DMA_INTR_ENA_TUE)
+                                DMA_INTR_ENA_TUE | DMA_INTR_ENA_TIE)
 
 #define DMA_INTR_ABNORMAL      (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
                                 DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
@@ -374,6 +374,7 @@ struct xgmac_priv {
        struct sk_buff **tx_skbuff;
        unsigned int tx_head;
        unsigned int tx_tail;
+       int tx_irq_cnt;
 
        void __iomem *base;
        unsigned int dma_buf_sz;
@@ -886,7 +887,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
        }
 
        if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
-           TX_THRESH)
+           MAX_SKB_FRAGS)
                netif_wake_queue(priv->dev);
 }
 
@@ -1057,19 +1058,15 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
        struct xgmac_priv *priv = netdev_priv(dev);
        unsigned int entry;
        int i;
+       u32 irq_flag;
        int nfrags = skb_shinfo(skb)->nr_frags;
        struct xgmac_dma_desc *desc, *first;
        unsigned int desc_flags;
        unsigned int len;
        dma_addr_t paddr;
 
-       if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
-           (nfrags + 1)) {
-               writel(DMA_INTR_DEFAULT_MASK | DMA_INTR_ENA_TIE,
-                       priv->base + XGMAC_DMA_INTR_ENA);
-               netif_stop_queue(dev);
-               return NETDEV_TX_BUSY;
-       }
+       priv->tx_irq_cnt = (priv->tx_irq_cnt + 1) & (DMA_TX_RING_SZ/4 - 1);
+       irq_flag = priv->tx_irq_cnt ? 0 : TXDESC_INTERRUPT;
 
        desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
                TXDESC_CSUM_ALL : 0;
@@ -1110,9 +1107,9 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Interrupt on completition only for the latest segment */
        if (desc != first)
                desc_set_tx_owner(desc, desc_flags |
-                       TXDESC_LAST_SEG | TXDESC_INTERRUPT);
+                       TXDESC_LAST_SEG | irq_flag);
        else
-               desc_flags |= TXDESC_LAST_SEG | TXDESC_INTERRUPT;
+               desc_flags |= TXDESC_LAST_SEG | irq_flag;
 
        /* Set owner on first desc last to avoid race condition */
        wmb();
@@ -1121,6 +1118,9 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
        priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
 
        writel(1, priv->base + XGMAC_DMA_TX_POLL);
+       if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
+           MAX_SKB_FRAGS)
+               netif_stop_queue(dev);
 
        return NETDEV_TX_OK;
 }
@@ -1397,7 +1397,7 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
        }
 
        /* TX/RX NORMAL interrupts */
-       if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU)) {
+       if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU | DMA_STATUS_TI)) {
                __raw_writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
                napi_schedule(&priv->napi);
        }