netpoll: hold rcu read lock in __netpoll_send_skb()
authorBreno Leitao <leitao@debian.org>
Thu, 6 Mar 2025 13:16:18 +0000 (05:16 -0800)
committerJakub Kicinski <kuba@kernel.org>
Sat, 8 Mar 2025 03:57:33 +0000 (19:57 -0800)
The function __netpoll_send_skb() is being invoked without holding the
RCU read lock. This oversight triggers a warning message when
CONFIG_PROVE_RCU_LIST is enabled:

net/core/netpoll.c:330 suspicious rcu_dereference_check() usage!

 netpoll_send_skb
 netpoll_send_udp
 write_ext_msg
 console_flush_all
 console_unlock
 vprintk_emit

To prevent npinfo from disappearing unexpectedly, ensure that
__netpoll_send_skb() is protected with the RCU read lock.

Fixes: 2899656b494dcd1 ("netpoll: take rcu_read_lock_bh() in netpoll_send_skb_on_dev()")
Signed-off-by: Breno Leitao <leitao@debian.org>
Reviewed-by: Simon Horman <horms@kernel.org>
Link: https://patch.msgid.link/20250306-netpoll_rcu_v2-v2-1-bc4f5c51742a@debian.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/core/netpoll.c

index 62b4041aae1ae8c7dc47c89fb40b14bbd4ad0e0e..0ab722d95a2df68e2fa40bd3546cb3fa3bb6ee6a 100644 (file)
@@ -319,6 +319,7 @@ static int netpoll_owner_active(struct net_device *dev)
 static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
 {
        netdev_tx_t status = NETDEV_TX_BUSY;
+       netdev_tx_t ret = NET_XMIT_DROP;
        struct net_device *dev;
        unsigned long tries;
        /* It is up to the caller to keep npinfo alive. */
@@ -327,11 +328,12 @@ static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
        lockdep_assert_irqs_disabled();
 
        dev = np->dev;
+       rcu_read_lock();
        npinfo = rcu_dereference_bh(dev->npinfo);
 
        if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
                dev_kfree_skb_irq(skb);
-               return NET_XMIT_DROP;
+               goto out;
        }
 
        /* don't get messages out of order, and no recursion */
@@ -370,7 +372,10 @@ static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
                skb_queue_tail(&npinfo->txq, skb);
                schedule_delayed_work(&npinfo->tx_work,0);
        }
-       return NETDEV_TX_OK;
+       ret = NETDEV_TX_OK;
+out:
+       rcu_read_unlock();
+       return ret;
 }
 
 netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)