net: hv_netvsc: fix loss of early receive events from host during channel open.
authorDipayaan Roy <dipayanroy@linux.microsoft.com>
Mon, 25 Aug 2025 11:56:27 +0000 (04:56 -0700)
committerJakub Kicinski <kuba@kernel.org>
Wed, 27 Aug 2025 01:15:19 +0000 (18:15 -0700)
The hv_netvsc driver currently enables NAPI after opening the primary and
subchannels. This ordering creates a race: if the Hyper-V host places data
in the host -> guest ring buffer and signals the channel before
napi_enable() has been called, the channel callback will run but
napi_schedule_prep() will return false. As a result, the NAPI poller never
gets scheduled, the data in the ring buffer is not consumed, and the
receive queue may remain permanently stuck until another interrupt happens
to arrive.

Fix this by enabling NAPI and registering it with the RX/TX queues before
vmbus channel is opened. This guarantees that any early host signal after
open will correctly trigger NAPI scheduling and the ring buffer will be
drained.

Fixes: 76bb5db5c749d ("netvsc: fix use after free on module removal")
Signed-off-by: Dipayaan Roy <dipayanroy@linux.microsoft.com>
Link: https://patch.msgid.link/20250825115627.GA32189@linuxonhyperv3.guj3yctzbm1etfxqx2vob5hsef.xx.internal.cloudapp.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/rndis_filter.c

index 720104661d7f24b03a756360d10065d964707233..60a4629fe6ba7a288cea4be38cb394ecbb6509a1 100644 (file)
@@ -1812,6 +1812,11 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
 
        /* Enable NAPI handler before init callbacks */
        netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll);
+       napi_enable(&net_device->chan_table[0].napi);
+       netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX,
+                            &net_device->chan_table[0].napi);
+       netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX,
+                            &net_device->chan_table[0].napi);
 
        /* Open the channel */
        device->channel->next_request_id_callback = vmbus_next_request_id;
@@ -1831,12 +1836,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
        /* Channel is opened */
        netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
 
-       napi_enable(&net_device->chan_table[0].napi);
-       netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX,
-                            &net_device->chan_table[0].napi);
-       netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX,
-                            &net_device->chan_table[0].napi);
-
        /* Connect with the NetVsp */
        ret = netvsc_connect_vsp(device, net_device, device_info);
        if (ret != 0) {
@@ -1854,14 +1853,14 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
 
 close:
        RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
-       netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
-       netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
-       napi_disable(&net_device->chan_table[0].napi);
 
        /* Now, we can close the channel safely */
        vmbus_close(device->channel);
 
 cleanup:
+       netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
+       netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
+       napi_disable(&net_device->chan_table[0].napi);
        netif_napi_del(&net_device->chan_table[0].napi);
 
 cleanup2:
index 9e73959e61ee0b953fd904bb92a1795e6ababe6a..c35f9685b6bf04e484711388b0c07ad58f2cad87 100644 (file)
@@ -1252,17 +1252,26 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
        new_sc->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
        new_sc->max_pkt_size = NETVSC_MAX_PKT_SIZE;
 
+       /* Enable napi before opening the vmbus channel to avoid races
+        * as the host placing data on the host->guest ring may be left
+        * out if napi was not enabled.
+        */
+       napi_enable(&nvchan->napi);
+       netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX,
+                            &nvchan->napi);
+       netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX,
+                            &nvchan->napi);
+
        ret = vmbus_open(new_sc, netvsc_ring_bytes,
                         netvsc_ring_bytes, NULL, 0,
                         netvsc_channel_cb, nvchan);
-       if (ret == 0) {
-               napi_enable(&nvchan->napi);
-               netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX,
-                                    &nvchan->napi);
-               netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX,
-                                    &nvchan->napi);
-       } else {
+       if (ret != 0) {
                netdev_notice(ndev, "sub channel open failed: %d\n", ret);
+               netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX,
+                                    NULL);
+               netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX,
+                                    NULL);
+               napi_disable(&nvchan->napi);
        }
 
        if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn)