IB/ipoib: Get rid of the tx_outstanding variable in all modes
authorErez Shitrit <erezsh@mellanox.com>
Thu, 19 Oct 2017 04:56:42 +0000 (07:56 +0300)
committerDoug Ledford <dledford@redhat.com>
Wed, 25 Oct 2017 17:36:50 +0000 (13:36 -0400)
The first step toward using NAPI in the UD/TX flow is to separate
between two flows, the NAPI and the xmit, meaning no use of shared
variables between both flows.

This patch takes out the tx_outstanding variable that was used in both
flows and instead the driver uses the 2 cyclic ring variables: tx_head
and tx_tail, tx_head used in the xmit flow and tx_tail in the NAPI flow.

Cc: Kamal Heib <kamalh@mellanox.com>
Signed-off-by: Erez Shitrit <erezsh@mellanox.com>
Reviewed-by: Alex Vesker <valex@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c

index 7cc2b755413d25e8d17589e1079b9ff179068c17..19c3ba2368d2ad5e7f37c3522996dbcd6b71a6c4 100644 (file)
@@ -381,7 +381,6 @@ struct ipoib_dev_priv {
        unsigned             tx_tail;
        struct ib_sge        tx_sge[MAX_SKB_FRAGS + 1];
        struct ib_ud_wr      tx_wr;
-       unsigned             tx_outstanding;
        struct ib_wc         send_wc[MAX_SEND_CQE];
 
        struct ib_recv_wr    rx_wr;
index 7500c28eac6b2f11a154ccf118926fd84b2023ec..6e0fc592791e97ee35cf77b48b9e1c8fde76d503 100644 (file)
@@ -769,8 +769,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
        } else {
                netif_trans_update(dev);
                ++tx->tx_head;
-
-               if (++priv->tx_outstanding == ipoib_sendq_size) {
+               ++priv->tx_head;
+               if ((priv->tx_head - priv->tx_tail) == ipoib_sendq_size) {
                        ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
                                  tx->qp->qp_num);
                        netif_stop_queue(dev);
@@ -814,7 +814,8 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
        netif_tx_lock(dev);
 
        ++tx->tx_tail;
-       if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
+       ++priv->tx_tail;
+       if (unlikely((priv->tx_head - priv->tx_tail) == ipoib_sendq_size >> 1) &&
            netif_queue_stopped(dev) &&
            test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
                netif_wake_queue(dev);
@@ -1220,8 +1221,9 @@ timeout:
                ipoib_dma_unmap_tx(priv, tx_req);
                dev_kfree_skb_any(tx_req->skb);
                ++p->tx_tail;
+               ++priv->tx_tail;
                netif_tx_lock_bh(p->dev);
-               if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
+               if (unlikely(priv->tx_head - priv->tx_tail == ipoib_sendq_size >> 1) &&
                    netif_queue_stopped(p->dev) &&
                    test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
                        netif_wake_queue(p->dev);
index 89d82e27a445cca4631b46e881ee7eb2d908c9d9..c978f8ffd2bbc65183eb08c353ec7d76cdaf83ef 100644 (file)
@@ -406,7 +406,7 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
        dev_kfree_skb_any(tx_req->skb);
 
        ++priv->tx_tail;
-       if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
+       if (unlikely((priv->tx_head - priv->tx_tail) == ipoib_sendq_size >> 1) &&
            netif_queue_stopped(dev) &&
            test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
                netif_wake_queue(dev);
@@ -611,8 +611,8 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
                priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM;
        else
                priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
-
-       if (++priv->tx_outstanding == ipoib_sendq_size) {
+       /* increase the tx_head after send success, but use it for queue state */
+       if (priv->tx_head - priv->tx_tail == ipoib_sendq_size - 1) {
                ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
                if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
                        ipoib_warn(priv, "request notify on send CQ failed\n");
@@ -627,7 +627,6 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
        if (unlikely(rc)) {
                ipoib_warn(priv, "post_send failed, error %d\n", rc);
                ++dev->stats.tx_errors;
-               --priv->tx_outstanding;
                ipoib_dma_unmap_tx(priv, tx_req);
                dev_kfree_skb_any(skb);
                if (netif_queue_stopped(dev))
@@ -640,7 +639,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
                ++priv->tx_head;
        }
 
-       if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
+       if (unlikely(priv->tx_head - priv->tx_tail > MAX_SEND_CQE))
                while (poll_tx(priv))
                        ; /* nothing */
 
@@ -773,7 +772,6 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
                                ipoib_dma_unmap_tx(priv, tx_req);
                                dev_kfree_skb_any(tx_req->skb);
                                ++priv->tx_tail;
-                               --priv->tx_outstanding;
                        }
 
                        for (i = 0; i < ipoib_recvq_size; ++i) {