* This work is licensed under the terms of the GNU GPL, version 2.
*/
#include <linux/miscdevice.h>
+#include <linux/atomic.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/vmalloc.h>
#include <net/sock.h>
#include <linux/virtio_vsock.h>
#include <linux/vhost.h>
#include <net/af_vsock.h>
#include "vhost.h"
-#include "vsock.h"
#define VHOST_VSOCK_DEFAULT_HOST_CID 2
-static int vhost_transport_socket_init(struct vsock_sock *vsk,
- struct vsock_sock *psk);
-
enum {
VHOST_VSOCK_FEATURES = VHOST_FEATURES,
};
/* Used to track all the vhost_vsock instances on the system. */
+static DEFINE_SPINLOCK(vhost_vsock_lock);
static LIST_HEAD(vhost_vsock_list);
-static DEFINE_MUTEX(vhost_vsock_mutex);
-
-struct vhost_vsock_virtqueue {
- struct vhost_virtqueue vq;
-};
struct vhost_vsock {
- /* Vhost device */
struct vhost_dev dev;
- /* Vhost vsock virtqueue*/
- struct vhost_vsock_virtqueue vqs[VSOCK_VQ_MAX];
- /* Link to global vhost_vsock_list*/
+ struct vhost_virtqueue vqs[2];
+
+ /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
struct list_head list;
- /* Head for pkt from host to guest */
- struct list_head send_pkt_list;
- /* Work item to send pkt */
+
struct vhost_work send_pkt_work;
- /* Wait queue for send pkt */
- wait_queue_head_t queue_wait;
- /* Used for global tx buf limitation */
- u32 total_tx_buf;
- /* Guest contex id this vhost_vsock instance handles */
+ spinlock_t send_pkt_list_lock;
+ struct list_head send_pkt_list; /* host->guest pending packets */
+
+ atomic_t queued_replies;
+
u32 guest_cid;
};
{
struct vhost_vsock *vsock;
- mutex_lock(&vhost_vsock_mutex);
+ spin_lock_bh(&vhost_vsock_lock);
list_for_each_entry(vsock, &vhost_vsock_list, list) {
- if (vsock->guest_cid == guest_cid) {
- mutex_unlock(&vhost_vsock_mutex);
+ u32 other_cid = vsock->guest_cid;
+
+ /* Skip instances that have no CID yet */
+ if (other_cid == 0)
+ continue;
+
+ if (other_cid == guest_cid) {
+ spin_unlock_bh(&vhost_vsock_lock);
return vsock;
}
}
- mutex_unlock(&vhost_vsock_mutex);
+ spin_unlock_bh(&vhost_vsock_lock);
return NULL;
}
vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
struct vhost_virtqueue *vq)
{
+ struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
bool added = false;
+ bool restart_tx = false;
mutex_lock(&vq->mutex);
+
+ if (!vq->private_data)
+ goto out;
+
+ /* Avoid further vmexits, we're already processing the virtqueue */
vhost_disable_notify(&vsock->dev, vq);
+
for (;;) {
struct virtio_vsock_pkt *pkt;
struct iov_iter iov_iter;
unsigned out, in;
- struct sock *sk;
size_t nbytes;
size_t len;
int head;
+ spin_lock_bh(&vsock->send_pkt_list_lock);
if (list_empty(&vsock->send_pkt_list)) {
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
vhost_enable_notify(&vsock->dev, vq);
break;
}
+ pkt = list_first_entry(&vsock->send_pkt_list,
+ struct virtio_vsock_pkt, list);
+ list_del_init(&pkt->list);
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
&out, &in, NULL, NULL);
- pr_debug("%s: head = %d\n", __func__, head);
- if (head < 0)
+ if (head < 0) {
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ list_add(&pkt->list, &vsock->send_pkt_list);
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
break;
+ }
if (head == vq->num) {
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ list_add(&pkt->list, &vsock->send_pkt_list);
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ /* We cannot finish yet if more buffers snuck in while
+ * re-enabling notify.
+ */
if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
vhost_disable_notify(&vsock->dev, vq);
continue;
break;
}
- pkt = list_first_entry(&vsock->send_pkt_list,
- struct virtio_vsock_pkt, list);
- list_del_init(&pkt->list);
-
if (out) {
virtio_transport_free_pkt(pkt);
vq_err(vq, "Expected 0 output buffers, got %u\n", out);
break;
}
- vhost_add_used(vq, head, pkt->len); /* TODO should this be sizeof(pkt->hdr) + pkt->len? */
+ vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
added = true;
- virtio_transport_dec_tx_pkt(pkt);
- vsock->total_tx_buf -= pkt->len;
+ if (pkt->reply) {
+ int val;
+
+ val = atomic_dec_return(&vsock->queued_replies);
- sk = sk_vsock(pkt->trans->vsk);
- /* Release refcnt taken in vhost_transport_send_pkt */
- sock_put(sk);
+ /* Do we have resources to resume tx processing? */
+ if (val + 1 == tx_vq->num)
+ restart_tx = true;
+ }
virtio_transport_free_pkt(pkt);
}
if (added)
vhost_signal(&vsock->dev, vq);
+
+out:
mutex_unlock(&vq->mutex);
- if (added)
- wake_up(&vsock->queue_wait);
+ if (restart_tx)
+ vhost_poll_queue(&tx_vq->poll);
}
static void vhost_transport_send_pkt_work(struct vhost_work *work)
struct vhost_vsock *vsock;
vsock = container_of(work, struct vhost_vsock, send_pkt_work);
- vq = &vsock->vqs[VSOCK_VQ_RX].vq;
+ vq = &vsock->vqs[VSOCK_VQ_RX];
vhost_transport_do_send_pkt(vsock, vq);
}
static int
-vhost_transport_send_pkt(struct vsock_sock *vsk,
- struct virtio_vsock_pkt_info *info)
+vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
{
- u32 src_cid, src_port, dst_cid, dst_port;
- struct virtio_transport *trans;
- struct virtio_vsock_pkt *pkt;
- struct vhost_virtqueue *vq;
struct vhost_vsock *vsock;
- u32 pkt_len = info->pkt_len;
- DEFINE_WAIT(wait);
-
- src_cid = vhost_transport_get_local_cid();
- src_port = vsk->local_addr.svm_port;
- if (!info->remote_cid) {
- dst_cid = vsk->remote_addr.svm_cid;
- dst_port = vsk->remote_addr.svm_port;
- } else {
- dst_cid = info->remote_cid;
- dst_port = info->remote_port;
- }
+ struct vhost_virtqueue *vq;
+ int len = pkt->len;
/* Find the vhost_vsock according to guest context id */
- vsock = vhost_vsock_get(dst_cid);
- if (!vsock)
+ vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
+ if (!vsock) {
+ virtio_transport_free_pkt(pkt);
return -ENODEV;
-
- trans = vsk->trans;
- vq = &vsock->vqs[VSOCK_VQ_RX].vq;
-
- /* we can send less than pkt_len bytes */
- if (pkt_len > VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE)
- pkt_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
-
- /* virtio_transport_get_credit might return less than pkt_len credit */
- pkt_len = virtio_transport_get_credit(trans, pkt_len);
-
- /* Do not send zero length OP_RW pkt*/
- if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW)
- return pkt_len;
-
- /* Respect global tx buf limitation */
- mutex_lock(&vq->mutex);
- while (pkt_len + vsock->total_tx_buf > VIRTIO_VSOCK_MAX_TX_BUF_SIZE) {
- prepare_to_wait_exclusive(&vsock->queue_wait, &wait,
- TASK_UNINTERRUPTIBLE);
- mutex_unlock(&vq->mutex);
- schedule();
- mutex_lock(&vq->mutex);
- finish_wait(&vsock->queue_wait, &wait);
}
- vsock->total_tx_buf += pkt_len;
- mutex_unlock(&vq->mutex);
- pkt = virtio_transport_alloc_pkt(vsk, info, pkt_len,
- src_cid, src_port,
- dst_cid, dst_port);
- if (!pkt) {
- mutex_lock(&vq->mutex);
- vsock->total_tx_buf -= pkt_len;
- mutex_unlock(&vq->mutex);
- virtio_transport_put_credit(trans, pkt_len);
- return -ENOMEM;
- }
+ vq = &vsock->vqs[VSOCK_VQ_RX];
- pr_debug("%s:info->pkt_len= %d\n", __func__, pkt_len);
- /* Released in vhost_transport_do_send_pkt */
- sock_hold(&trans->vsk->sk);
- virtio_transport_inc_tx_pkt(pkt);
+ if (pkt->reply)
+ atomic_inc(&vsock->queued_replies);
- /* Queue it up in vhost work */
- mutex_lock(&vq->mutex);
+ spin_lock_bh(&vsock->send_pkt_list_lock);
list_add_tail(&pkt->list, &vsock->send_pkt_list);
- vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
- mutex_unlock(&vq->mutex);
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
- return pkt_len;
+ vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
+ return len;
}
-static struct virtio_transport_pkt_ops vhost_ops = {
- .send_pkt = vhost_transport_send_pkt,
-};
-
static struct virtio_vsock_pkt *
vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
unsigned int out, unsigned int in)
return NULL;
}
- if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_DGRAM)
- pkt->len = le32_to_cpu(pkt->hdr.len) & 0XFFFF;
- else if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
+ if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
pkt->len = le32_to_cpu(pkt->hdr.len);
/* No payload */
return pkt;
}
-static void vhost_vsock_handle_ctl_kick(struct vhost_work *work)
+/* Is there space left for replies to rx packets? */
+static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
{
- struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
- poll.work);
- struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
- dev);
+ struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
+ int val;
+
+ smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
+ val = atomic_read(&vsock->queued_replies);
- pr_debug("%s vq=%p, vsock=%p\n", __func__, vq, vsock);
+ return val < vq->num;
}
static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
int head;
unsigned int out, in;
bool added = false;
- u32 len;
mutex_lock(&vq->mutex);
+
+ if (!vq->private_data)
+ goto out;
+
vhost_disable_notify(&vsock->dev, vq);
for (;;) {
+ u32 len;
+
+ if (!vhost_vsock_more_replies(vsock)) {
+ /* Stop tx until the device processes already
+ * pending replies. Leave tx virtqueue
+ * callbacks disabled.
+ */
+ goto no_more_replies;
+ }
+
head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
&out, &in, NULL, NULL);
if (head < 0)
len = pkt->len;
/* Only accept correctly addressed packets */
- if (le32_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
- le32_to_cpu(pkt->hdr.dst_cid) == vhost_transport_get_local_cid())
+ if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
virtio_transport_recv_pkt(pkt);
else
virtio_transport_free_pkt(pkt);
- vhost_add_used(vq, head, len);
+ vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
added = true;
}
+
+no_more_replies:
if (added)
vhost_signal(&vsock->dev, vq);
+
+out:
mutex_unlock(&vq->mutex);
}
vhost_transport_do_send_pkt(vsock, vq);
}
+static int vhost_vsock_start(struct vhost_vsock *vsock)
+{
+ size_t i;
+ int ret;
+
+ mutex_lock(&vsock->dev.mutex);
+
+ ret = vhost_dev_check_owner(&vsock->dev);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ struct vhost_virtqueue *vq = &vsock->vqs[i];
+
+ mutex_lock(&vq->mutex);
+
+ if (!vhost_vq_access_ok(vq)) {
+ ret = -EFAULT;
+ mutex_unlock(&vq->mutex);
+ goto err_vq;
+ }
+
+ if (!vq->private_data) {
+ vq->private_data = vsock;
+ vhost_vq_init_access(vq);
+ }
+
+ mutex_unlock(&vq->mutex);
+ }
+
+ mutex_unlock(&vsock->dev.mutex);
+ return 0;
+
+err_vq:
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ struct vhost_virtqueue *vq = &vsock->vqs[i];
+
+ mutex_lock(&vq->mutex);
+ vq->private_data = NULL;
+ mutex_unlock(&vq->mutex);
+ }
+err:
+ mutex_unlock(&vsock->dev.mutex);
+ return ret;
+}
+
+static int vhost_vsock_stop(struct vhost_vsock *vsock)
+{
+ size_t i;
+ int ret;
+
+ mutex_lock(&vsock->dev.mutex);
+
+ ret = vhost_dev_check_owner(&vsock->dev);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ struct vhost_virtqueue *vq = &vsock->vqs[i];
+
+ mutex_lock(&vq->mutex);
+ vq->private_data = NULL;
+ mutex_unlock(&vq->mutex);
+ }
+
+err:
+ mutex_unlock(&vsock->dev.mutex);
+ return ret;
+}
+
+static void vhost_vsock_free(struct vhost_vsock *vsock)
+{
+ kvfree(vsock);
+}
+
static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
{
struct vhost_virtqueue **vqs;
struct vhost_vsock *vsock;
int ret;
- vsock = kzalloc(sizeof(*vsock), GFP_KERNEL);
- if (!vsock)
- return -ENOMEM;
-
- pr_debug("%s:vsock=%p\n", __func__, vsock);
+ /* This struct is large and allocation could fail, fall back to vmalloc
+ * if there is no other way.
+ */
+ vsock = kzalloc(sizeof(*vsock), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+ if (!vsock) {
+ vsock = vmalloc(sizeof(*vsock));
+ if (!vsock)
+ return -ENOMEM;
+ }
- vqs = kmalloc(VSOCK_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
+ vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
if (!vqs) {
ret = -ENOMEM;
goto out;
}
- vqs[VSOCK_VQ_CTRL] = &vsock->vqs[VSOCK_VQ_CTRL].vq;
- vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX].vq;
- vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX].vq;
- vsock->vqs[VSOCK_VQ_CTRL].vq.handle_kick = vhost_vsock_handle_ctl_kick;
- vsock->vqs[VSOCK_VQ_TX].vq.handle_kick = vhost_vsock_handle_tx_kick;
- vsock->vqs[VSOCK_VQ_RX].vq.handle_kick = vhost_vsock_handle_rx_kick;
+ atomic_set(&vsock->queued_replies, 0);
- vhost_dev_init(&vsock->dev, vqs, VSOCK_VQ_MAX);
+ vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
+ vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
+ vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
+ vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
+
+ vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
file->private_data = vsock;
- init_waitqueue_head(&vsock->queue_wait);
+ spin_lock_init(&vsock->send_pkt_list_lock);
INIT_LIST_HEAD(&vsock->send_pkt_list);
vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
- mutex_lock(&vhost_vsock_mutex);
+ spin_lock_bh(&vhost_vsock_lock);
list_add_tail(&vsock->list, &vhost_vsock_list);
- mutex_unlock(&vhost_vsock_mutex);
+ spin_unlock_bh(&vhost_vsock_lock);
return 0;
out:
- kfree(vsock);
+ vhost_vsock_free(vsock);
return ret;
}
{
int i;
- for (i = 0; i < VSOCK_VQ_MAX; i++)
- vhost_poll_flush(&vsock->vqs[i].vq.poll);
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
+ if (vsock->vqs[i].handle_kick)
+ vhost_poll_flush(&vsock->vqs[i].poll);
vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
}
+static void vhost_vsock_reset_orphans(struct sock *sk)
+{
+ struct vsock_sock *vsk = vsock_sk(sk);
+
+ /* vmci_transport.c doesn't take sk_lock here either. At least we're
+ * under vsock_table_lock so the sock cannot disappear while we're
+ * executing.
+ */
+
+ if (!vhost_vsock_get(vsk->local_addr.svm_cid)) {
+ sock_set_flag(sk, SOCK_DONE);
+ vsk->peer_shutdown = SHUTDOWN_MASK;
+ sk->sk_state = SS_UNCONNECTED;
+ sk->sk_err = ECONNRESET;
+ sk->sk_error_report(sk);
+ }
+}
+
static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
{
struct vhost_vsock *vsock = file->private_data;
- mutex_lock(&vhost_vsock_mutex);
+ spin_lock_bh(&vhost_vsock_lock);
list_del(&vsock->list);
- mutex_unlock(&vhost_vsock_mutex);
+ spin_unlock_bh(&vhost_vsock_lock);
- vhost_dev_stop(&vsock->dev);
+ /* Iterating over all connections for all CIDs to find orphans is
+ * inefficient. Room for improvement here. */
+ vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
+
+ vhost_vsock_stop(vsock);
vhost_vsock_flush(vsock);
+ vhost_dev_stop(&vsock->dev);
+
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ while (!list_empty(&vsock->send_pkt_list)) {
+ struct virtio_vsock_pkt *pkt;
+
+ pkt = list_first_entry(&vsock->send_pkt_list,
+ struct virtio_vsock_pkt, list);
+ list_del_init(&pkt->list);
+ virtio_transport_free_pkt(pkt);
+ }
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
vhost_dev_cleanup(&vsock->dev, false);
kfree(vsock->dev.vqs);
- kfree(vsock);
+ vhost_vsock_free(vsock);
return 0;
}
-static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u32 guest_cid)
+static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
{
struct vhost_vsock *other;
/* Refuse reserved CIDs */
- if (guest_cid <= VMADDR_CID_HOST) {
+ if (guest_cid <= VMADDR_CID_HOST ||
+ guest_cid == U32_MAX)
+ return -EINVAL;
+
+ /* 64-bit CIDs are not yet supported */
+ if (guest_cid > U32_MAX)
return -EINVAL;
- }
/* Refuse if CID is already in use */
other = vhost_vsock_get(guest_cid);
- if (other && other != vsock) {
+ if (other && other != vsock)
return -EADDRINUSE;
- }
- mutex_lock(&vhost_vsock_mutex);
+ spin_lock_bh(&vhost_vsock_lock);
vsock->guest_cid = guest_cid;
- pr_debug("%s:guest_cid=%d\n", __func__, guest_cid);
- mutex_unlock(&vhost_vsock_mutex);
+ spin_unlock_bh(&vhost_vsock_lock);
return 0;
}
return -EFAULT;
}
- for (i = 0; i < VSOCK_VQ_MAX; i++) {
- vq = &vsock->vqs[i].vq;
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
vq->acked_features = features;
mutex_unlock(&vq->mutex);
{
struct vhost_vsock *vsock = f->private_data;
void __user *argp = (void __user *)arg;
- u64 __user *featurep = argp;
- u32 __user *cidp = argp;
- u32 guest_cid;
+ u64 guest_cid;
u64 features;
+ int start;
int r;
switch (ioctl) {
case VHOST_VSOCK_SET_GUEST_CID:
- if (get_user(guest_cid, cidp))
+ if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
return -EFAULT;
return vhost_vsock_set_cid(vsock, guest_cid);
+ case VHOST_VSOCK_SET_RUNNING:
+ if (copy_from_user(&start, argp, sizeof(start)))
+ return -EFAULT;
+ if (start)
+ return vhost_vsock_start(vsock);
+ else
+ return vhost_vsock_stop(vsock);
case VHOST_GET_FEATURES:
features = VHOST_VSOCK_FEATURES;
- if (copy_to_user(featurep, &features, sizeof(features)))
+ if (copy_to_user(argp, &features, sizeof(features)))
return -EFAULT;
return 0;
case VHOST_SET_FEATURES:
- if (copy_from_user(&features, featurep, sizeof(features)))
+ if (copy_from_user(&features, argp, sizeof(features)))
return -EFAULT;
return vhost_vsock_set_features(vsock, features);
default:
.fops = &vhost_vsock_fops,
};
-static int
-vhost_transport_socket_init(struct vsock_sock *vsk, struct vsock_sock *psk)
-{
- struct virtio_transport *trans;
- int ret;
-
- ret = virtio_transport_do_socket_init(vsk, psk);
- if (ret)
- return ret;
-
- trans = vsk->trans;
- trans->ops = &vhost_ops;
+static struct virtio_transport vhost_transport = {
+ .transport = {
+ .get_local_cid = vhost_transport_get_local_cid,
+
+ .init = virtio_transport_do_socket_init,
+ .destruct = virtio_transport_destruct,
+ .release = virtio_transport_release,
+ .connect = virtio_transport_connect,
+ .shutdown = virtio_transport_shutdown,
+
+ .dgram_enqueue = virtio_transport_dgram_enqueue,
+ .dgram_dequeue = virtio_transport_dgram_dequeue,
+ .dgram_bind = virtio_transport_dgram_bind,
+ .dgram_allow = virtio_transport_dgram_allow,
+
+ .stream_enqueue = virtio_transport_stream_enqueue,
+ .stream_dequeue = virtio_transport_stream_dequeue,
+ .stream_has_data = virtio_transport_stream_has_data,
+ .stream_has_space = virtio_transport_stream_has_space,
+ .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
+ .stream_is_active = virtio_transport_stream_is_active,
+ .stream_allow = virtio_transport_stream_allow,
+
+ .notify_poll_in = virtio_transport_notify_poll_in,
+ .notify_poll_out = virtio_transport_notify_poll_out,
+ .notify_recv_init = virtio_transport_notify_recv_init,
+ .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
+ .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
+ .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
+ .notify_send_init = virtio_transport_notify_send_init,
+ .notify_send_pre_block = virtio_transport_notify_send_pre_block,
+ .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
+ .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
+
+ .set_buffer_size = virtio_transport_set_buffer_size,
+ .set_min_buffer_size = virtio_transport_set_min_buffer_size,
+ .set_max_buffer_size = virtio_transport_set_max_buffer_size,
+ .get_buffer_size = virtio_transport_get_buffer_size,
+ .get_min_buffer_size = virtio_transport_get_min_buffer_size,
+ .get_max_buffer_size = virtio_transport_get_max_buffer_size,
+ },
- return ret;
-}
-
-static struct vsock_transport vhost_transport = {
- .get_local_cid = vhost_transport_get_local_cid,
-
- .init = vhost_transport_socket_init,
- .destruct = virtio_transport_destruct,
- .release = virtio_transport_release,
- .connect = virtio_transport_connect,
- .shutdown = virtio_transport_shutdown,
-
- .dgram_enqueue = virtio_transport_dgram_enqueue,
- .dgram_dequeue = virtio_transport_dgram_dequeue,
- .dgram_bind = virtio_transport_dgram_bind,
- .dgram_allow = virtio_transport_dgram_allow,
-
- .stream_enqueue = virtio_transport_stream_enqueue,
- .stream_dequeue = virtio_transport_stream_dequeue,
- .stream_has_data = virtio_transport_stream_has_data,
- .stream_has_space = virtio_transport_stream_has_space,
- .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
- .stream_is_active = virtio_transport_stream_is_active,
- .stream_allow = virtio_transport_stream_allow,
-
- .notify_poll_in = virtio_transport_notify_poll_in,
- .notify_poll_out = virtio_transport_notify_poll_out,
- .notify_recv_init = virtio_transport_notify_recv_init,
- .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
- .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
- .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
- .notify_send_init = virtio_transport_notify_send_init,
- .notify_send_pre_block = virtio_transport_notify_send_pre_block,
- .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
- .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
-
- .set_buffer_size = virtio_transport_set_buffer_size,
- .set_min_buffer_size = virtio_transport_set_min_buffer_size,
- .set_max_buffer_size = virtio_transport_set_max_buffer_size,
- .get_buffer_size = virtio_transport_get_buffer_size,
- .get_min_buffer_size = virtio_transport_get_min_buffer_size,
- .get_max_buffer_size = virtio_transport_get_max_buffer_size,
+ .send_pkt = vhost_transport_send_pkt,
};
static int __init vhost_vsock_init(void)
{
int ret;
- ret = vsock_core_init(&vhost_transport);
+ ret = vsock_core_init(&vhost_transport.transport);
if (ret < 0)
return ret;
return misc_register(&vhost_vsock_misc);