This patch renames vhost_work_dev_flush to just vhost_dev_flush to
relfect that it flushes everything on the device and that drivers
don't know/care that polls are based on vhost_works. Drivers just
flush the entire device and polls, and works for vhost-scsi
management TMFs and IO net virtqueues, etc all are flushed.
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
Message-Id: <
20220517180850.198915-9-michael.christie@oracle.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
static void vhost_net_flush(struct vhost_net *n)
{
- vhost_work_dev_flush(&n->dev);
+ vhost_dev_flush(&n->dev);
if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
n->tx_flush = true;
}
if (oldsock) {
- vhost_work_dev_flush(&n->dev);
+ vhost_dev_flush(&n->dev);
sockfd_put(oldsock);
}
kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
/* Flush both the vhost poll and vhost work */
- vhost_work_dev_flush(&vs->dev);
+ vhost_dev_flush(&vs->dev);
/* Wait for all reqs issued before the flush to be finished */
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
static void vhost_test_flush(struct vhost_test *n)
{
- vhost_work_dev_flush(&n->dev);
+ vhost_dev_flush(&n->dev);
}
static int vhost_test_release(struct inode *inode, struct file *f)
}
EXPORT_SYMBOL_GPL(vhost_poll_stop);
-void vhost_work_dev_flush(struct vhost_dev *dev)
+void vhost_dev_flush(struct vhost_dev *dev)
{
struct vhost_flush_struct flush;
wait_for_completion(&flush.wait_event);
}
}
-EXPORT_SYMBOL_GPL(vhost_work_dev_flush);
+EXPORT_SYMBOL_GPL(vhost_dev_flush);
void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
{
attach.owner = current;
vhost_work_init(&attach.work, vhost_attach_cgroups_work);
vhost_work_queue(dev, &attach.work);
- vhost_work_dev_flush(dev);
+ vhost_dev_flush(dev);
return attach.ret;
}
vhost_poll_stop(&dev->vqs[i]->poll);
}
- vhost_work_dev_flush(dev);
+ vhost_dev_flush(dev);
}
EXPORT_SYMBOL_GPL(vhost_dev_stop);
mutex_unlock(&vq->mutex);
if (pollstop && vq->handle_kick)
- vhost_work_dev_flush(vq->poll.dev);
+ vhost_dev_flush(vq->poll.dev);
return r;
}
EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
int vhost_poll_start(struct vhost_poll *poll, struct file *file);
void vhost_poll_stop(struct vhost_poll *poll);
void vhost_poll_queue(struct vhost_poll *poll);
-void vhost_work_dev_flush(struct vhost_dev *dev);
+void vhost_dev_flush(struct vhost_dev *dev);
struct vhost_log {
u64 addr;
static void vhost_vsock_flush(struct vhost_vsock *vsock)
{
- vhost_work_dev_flush(&vsock->dev);
+ vhost_dev_flush(&vsock->dev);
}
static void vhost_vsock_reset_orphans(struct sock *sk)