static void virtnet_napi_enable(struct receive_queue *rq)
{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ int qidx = vq2rxq(rq->vq);
+
virtnet_napi_do_enable(rq->vq, &rq->napi);
+ netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, &rq->napi);
}
static void virtnet_napi_tx_enable(struct send_queue *sq)
{
struct virtnet_info *vi = sq->vq->vdev->priv;
struct napi_struct *napi = &sq->napi;
+ int qidx = vq2txq(sq->vq);
if (!napi->weight)
return;
}
virtnet_napi_do_enable(sq->vq, napi);
+ netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, napi);
}
static void virtnet_napi_tx_disable(struct send_queue *sq)
{
+ struct virtnet_info *vi = sq->vq->vdev->priv;
struct napi_struct *napi = &sq->napi;
+ int qidx = vq2txq(sq->vq);
- if (napi->weight)
+ if (napi->weight) {
+ netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, NULL);
napi_disable(napi);
+ }
}
static void virtnet_napi_disable(struct receive_queue *rq)
{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
struct napi_struct *napi = &rq->napi;
+ int qidx = vq2rxq(rq->vq);
+ netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, NULL);
napi_disable(napi);
}
for (i = 0; i < vi->curr_queue_pairs; i++) {
struct receive_queue *rq = &vi->rq[i];
- virtnet_napi_disable(rq);
+ /*
+ * When queue API support is added in the future and the call
+ * below becomes napi_disable_locked, this driver will need to
+ * be refactored.
+ *
+ * One possible solution would be to:
+ * - cancel refill_work with cancel_delayed_work (note:
+ * non-sync)
+ * - cancel refill_work with cancel_delayed_work_sync in
+ * virtnet_remove after the netdev is unregistered
+ * - wrap all of the work in a lock (perhaps the netdev
+ * instance lock)
+ * - check netif_running() and return early to avoid a race
+ */
+ napi_disable(&rq->napi);
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
- virtnet_napi_enable(rq);
+ virtnet_napi_do_enable(rq->vq, &rq->napi);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again.
netif_tx_lock_bh(vi->dev);
netif_device_detach(vi->dev);
netif_tx_unlock_bh(vi->dev);
- if (netif_running(vi->dev))
+ if (netif_running(vi->dev)) {
+ rtnl_lock();
virtnet_close(vi->dev);
+ rtnl_unlock();
+ }
}
static int init_vqs(struct virtnet_info *vi);
enable_rx_mode_work(vi);
if (netif_running(vi->dev)) {
+ rtnl_lock();
err = virtnet_open(vi->dev);
+ rtnl_unlock();
if (err)
return err;
}