virtio-net: Refactor napi_enable paths
authorJoe Damato <jdamato@fastly.com>
Fri, 7 Mar 2025 01:12:09 +0000 (01:12 +0000)
committerJakub Kicinski <kuba@kernel.org>
Mon, 10 Mar 2025 20:09:21 +0000 (13:09 -0700)
Refactor virtnet_napi_enable and virtnet_napi_tx_enable to take a struct
receive_queue. Create a helper, virtnet_napi_do_enable, which contains
the logic to enable a NAPI.

Signed-off-by: Joe Damato <jdamato@fastly.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Tested-by: Lei Yang <leiyang@redhat.com>
Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Link: https://patch.msgid.link/20250307011215.266806-2-jdamato@fastly.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/virtio_net.c

index ac26a6201c4449e0894f224e5e6542a55cecad5c..133b004c7a9a9961754236e8061b70889b611973 100644 (file)
@@ -2807,7 +2807,8 @@ static void skb_recv_done(struct virtqueue *rvq)
        virtqueue_napi_schedule(&rq->napi, rvq);
 }
 
-static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
+static void virtnet_napi_do_enable(struct virtqueue *vq,
+                                  struct napi_struct *napi)
 {
        napi_enable(napi);
 
@@ -2820,10 +2821,16 @@ static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
        local_bh_enable();
 }
 
-static void virtnet_napi_tx_enable(struct virtnet_info *vi,
-                                  struct virtqueue *vq,
-                                  struct napi_struct *napi)
+static void virtnet_napi_enable(struct receive_queue *rq)
 {
+       virtnet_napi_do_enable(rq->vq, &rq->napi);
+}
+
+static void virtnet_napi_tx_enable(struct send_queue *sq)
+{
+       struct virtnet_info *vi = sq->vq->vdev->priv;
+       struct napi_struct *napi = &sq->napi;
+
        if (!napi->weight)
                return;
 
@@ -2835,7 +2842,7 @@ static void virtnet_napi_tx_enable(struct virtnet_info *vi,
                return;
        }
 
-       return virtnet_napi_enable(vq, napi);
+       virtnet_napi_do_enable(sq->vq, napi);
 }
 
 static void virtnet_napi_tx_disable(struct napi_struct *napi)
@@ -2856,7 +2863,7 @@ static void refill_work(struct work_struct *work)
 
                napi_disable(&rq->napi);
                still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
-               virtnet_napi_enable(rq->vq, &rq->napi);
+               virtnet_napi_enable(rq);
 
                /* In theory, this can happen: if we don't get any buffers in
                 * we will *never* try to fill again.
@@ -3073,8 +3080,8 @@ static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
        if (err < 0)
                goto err_xdp_reg_mem_model;
 
-       virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
-       virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
+       virtnet_napi_enable(&vi->rq[qp_index]);
+       virtnet_napi_tx_enable(&vi->sq[qp_index]);
 
        return 0;
 
@@ -3339,7 +3346,7 @@ static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
                schedule_delayed_work(&vi->refill, 0);
 
        if (running)
-               virtnet_napi_enable(rq->vq, &rq->napi);
+               virtnet_napi_enable(rq);
 }
 
 static int virtnet_rx_resize(struct virtnet_info *vi,
@@ -3402,7 +3409,7 @@ static void virtnet_tx_resume(struct virtnet_info *vi, struct send_queue *sq)
        __netif_tx_unlock_bh(txq);
 
        if (running)
-               virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
+               virtnet_napi_tx_enable(sq);
 }
 
 static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq,
@@ -5983,9 +5990,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
                if (old_prog)
                        bpf_prog_put(old_prog);
                if (netif_running(dev)) {
-                       virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
-                       virtnet_napi_tx_enable(vi, vi->sq[i].vq,
-                                              &vi->sq[i].napi);
+                       virtnet_napi_enable(&vi->rq[i]);
+                       virtnet_napi_tx_enable(&vi->sq[i]);
                }
        }
 
@@ -6000,9 +6006,8 @@ err:
 
        if (netif_running(dev)) {
                for (i = 0; i < vi->max_queue_pairs; i++) {
-                       virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
-                       virtnet_napi_tx_enable(vi, vi->sq[i].vq,
-                                              &vi->sq[i].napi);
+                       virtnet_napi_enable(&vi->rq[i]);
+                       virtnet_napi_tx_enable(&vi->sq[i]);
                }
        }
        if (prog)