virtnet_napi_do_enable(sq->vq, napi);
}
-static void virtnet_napi_tx_disable(struct napi_struct *napi)
+static void virtnet_napi_tx_disable(struct send_queue *sq)
{
+ struct napi_struct *napi = &sq->napi;
+
if (napi->weight)
napi_disable(napi);
}
+static void virtnet_napi_disable(struct receive_queue *rq)
+{
+ struct napi_struct *napi = &rq->napi;
+
+ napi_disable(napi);
+}
+
static void refill_work(struct work_struct *work)
{
struct virtnet_info *vi =
for (i = 0; i < vi->curr_queue_pairs; i++) {
struct receive_queue *rq = &vi->rq[i];
- napi_disable(&rq->napi);
+ virtnet_napi_disable(rq);
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
virtnet_napi_enable(rq);
static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
{
- virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
- napi_disable(&vi->rq[qp_index].napi);
+ virtnet_napi_tx_disable(&vi->sq[qp_index]);
+ virtnet_napi_disable(&vi->rq[qp_index]);
xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
}
bool running = netif_running(vi->dev);
if (running) {
- napi_disable(&rq->napi);
+ virtnet_napi_disable(rq);
virtnet_cancel_dim(vi, &rq->dim);
}
}
qindex = sq - vi->sq;
if (running)
- virtnet_napi_tx_disable(&sq->napi);
+ virtnet_napi_tx_disable(sq);
txq = netdev_get_tx_queue(vi->dev, qindex);
/* Make sure NAPI is not using any XDP TX queues for RX. */
if (netif_running(dev)) {
for (i = 0; i < vi->max_queue_pairs; i++) {
- napi_disable(&vi->rq[i].napi);
- virtnet_napi_tx_disable(&vi->sq[i].napi);
+ virtnet_napi_disable(&vi->rq[i]);
+ virtnet_napi_tx_disable(&vi->sq[i]);
}
}