r8169: use new macro netif_subqueue_maybe_stop in rtl8169_start_xmit
authorHeiner Kallweit <hkallweit1@gmail.com>
Mon, 17 Apr 2023 09:37:20 +0000 (11:37 +0200)
committerPaolo Abeni <pabeni@redhat.com>
Tue, 18 Apr 2023 10:59:01 +0000 (12:59 +0200)
Use new net core macro netif_subqueue_maybe_stop in the start_xmit path
to simplify the code. Whilst at it, set the tx queue start threshold to
twice the stop threshold. Before values were the same, resulting in
stopping/starting the queue more often than needed.

Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
drivers/net/ethernet/realtek/r8169_main.c

index 9f8357bbc8a487a4d2e9dd0510549ce020566cf6..fff44d46bce148732b65cdbf8f807a90c0cc9c2b 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/ipv6.h>
 #include <asm/unaligned.h>
 #include <net/ip6_checksum.h>
+#include <net/netdev_queues.h>
 
 #include "r8169.h"
 #include "r8169_firmware.h"
@@ -68,6 +69,8 @@
 #define NUM_RX_DESC    256     /* Number of Rx descriptor registers */
 #define R8169_TX_RING_BYTES    (NUM_TX_DESC * sizeof(struct TxDesc))
 #define R8169_RX_RING_BYTES    (NUM_RX_DESC * sizeof(struct RxDesc))
+#define R8169_TX_STOP_THRS     (MAX_SKB_FRAGS + 1)
+#define R8169_TX_START_THRS    (2 * R8169_TX_STOP_THRS)
 
 #define OCP_STD_PHY_BASE       0xa400
 
@@ -4162,13 +4165,9 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
        return true;
 }
 
-static bool rtl_tx_slots_avail(struct rtl8169_private *tp)
+static unsigned int rtl_tx_slots_avail(struct rtl8169_private *tp)
 {
-       unsigned int slots_avail = READ_ONCE(tp->dirty_tx) + NUM_TX_DESC
-                                       - READ_ONCE(tp->cur_tx);
-
-       /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
-       return slots_avail > MAX_SKB_FRAGS;
+       return READ_ONCE(tp->dirty_tx) + NUM_TX_DESC - READ_ONCE(tp->cur_tx);
 }
 
 /* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
@@ -4245,27 +4244,10 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
 
        WRITE_ONCE(tp->cur_tx, tp->cur_tx + frags + 1);
 
-       stop_queue = !rtl_tx_slots_avail(tp);
-       if (unlikely(stop_queue)) {
-               /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
-                * not miss a ring update when it notices a stopped queue.
-                */
-               smp_wmb();
-               netif_stop_queue(dev);
-               /* Sync with rtl_tx:
-                * - publish queue status and cur_tx ring index (write barrier)
-                * - refresh dirty_tx ring index (read barrier).
-                * May the current thread have a pessimistic view of the ring
-                * status and forget to wake up queue, a racing rtl_tx thread
-                * can't.
-                */
-               smp_mb__after_atomic();
-               if (rtl_tx_slots_avail(tp))
-                       netif_start_queue(dev);
-               door_bell = true;
-       }
-
-       if (door_bell)
+       stop_queue = !netif_subqueue_maybe_stop(dev, 0, rtl_tx_slots_avail(tp),
+                                               R8169_TX_STOP_THRS,
+                                               R8169_TX_START_THRS);
+       if (door_bell || stop_queue)
                rtl8169_doorbell(tp);
 
        return NETDEV_TX_OK;
@@ -4400,7 +4382,8 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
                 * ring status.
                 */
                smp_store_mb(tp->dirty_tx, dirty_tx);
-               if (netif_queue_stopped(dev) && rtl_tx_slots_avail(tp))
+               if (netif_queue_stopped(dev) &&
+                   rtl_tx_slots_avail(tp) >= R8169_TX_START_THRS)
                        netif_wake_queue(dev);
                /*
                 * 8168 hack: TxPoll requests are lost when the Tx packets are