Merge tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg...
[linux-block.git] / drivers / vhost / vsock.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * vhost transport for vsock
4  *
5  * Copyright (C) 2013-2015 Red Hat, Inc.
6  * Author: Asias He <asias@redhat.com>
7  *         Stefan Hajnoczi <stefanha@redhat.com>
8  */
9 #include <linux/miscdevice.h>
10 #include <linux/atomic.h>
11 #include <linux/module.h>
12 #include <linux/mutex.h>
13 #include <linux/vmalloc.h>
14 #include <net/sock.h>
15 #include <linux/virtio_vsock.h>
16 #include <linux/vhost.h>
17 #include <linux/hashtable.h>
18
19 #include <net/af_vsock.h>
20 #include "vhost.h"
21
22 #define VHOST_VSOCK_DEFAULT_HOST_CID    2
23 /* Max number of bytes transferred before requeueing the job.
24  * Using this limit prevents one virtqueue from starving others. */
25 #define VHOST_VSOCK_WEIGHT 0x80000
26 /* Max number of packets transferred before requeueing the job.
27  * Using this limit prevents one virtqueue from starving others with
28  * small pkts.
29  */
30 #define VHOST_VSOCK_PKT_WEIGHT 256
31
32 enum {
33         VHOST_VSOCK_FEATURES = VHOST_FEATURES |
34                                (1ULL << VIRTIO_F_ACCESS_PLATFORM) |
35                                (1ULL << VIRTIO_VSOCK_F_SEQPACKET)
36 };
37
38 enum {
39         VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
40 };
41
42 /* Used to track all the vhost_vsock instances on the system. */
43 static DEFINE_MUTEX(vhost_vsock_mutex);
44 static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
45
46 struct vhost_vsock {
47         struct vhost_dev dev;
48         struct vhost_virtqueue vqs[2];
49
50         /* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
51         struct hlist_node hash;
52
53         struct vhost_work send_pkt_work;
54         struct sk_buff_head send_pkt_queue; /* host->guest pending packets */
55
56         atomic_t queued_replies;
57
58         u32 guest_cid;
59         bool seqpacket_allow;
60 };
61
62 static u32 vhost_transport_get_local_cid(void)
63 {
64         return VHOST_VSOCK_DEFAULT_HOST_CID;
65 }
66
67 /* Callers that dereference the return value must hold vhost_vsock_mutex or the
68  * RCU read lock.
69  */
70 static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
71 {
72         struct vhost_vsock *vsock;
73
74         hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
75                 u32 other_cid = vsock->guest_cid;
76
77                 /* Skip instances that have no CID yet */
78                 if (other_cid == 0)
79                         continue;
80
81                 if (other_cid == guest_cid)
82                         return vsock;
83
84         }
85
86         return NULL;
87 }
88
89 static void
90 vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
91                             struct vhost_virtqueue *vq)
92 {
93         struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
94         int pkts = 0, total_len = 0;
95         bool added = false;
96         bool restart_tx = false;
97
98         mutex_lock(&vq->mutex);
99
100         if (!vhost_vq_get_backend(vq))
101                 goto out;
102
103         if (!vq_meta_prefetch(vq))
104                 goto out;
105
106         /* Avoid further vmexits, we're already processing the virtqueue */
107         vhost_disable_notify(&vsock->dev, vq);
108
109         do {
110                 struct virtio_vsock_hdr *hdr;
111                 size_t iov_len, payload_len;
112                 struct iov_iter iov_iter;
113                 u32 flags_to_restore = 0;
114                 struct sk_buff *skb;
115                 unsigned out, in;
116                 size_t nbytes;
117                 int head;
118
119                 skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
120
121                 if (!skb) {
122                         vhost_enable_notify(&vsock->dev, vq);
123                         break;
124                 }
125
126                 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
127                                          &out, &in, NULL, NULL);
128                 if (head < 0) {
129                         virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
130                         break;
131                 }
132
133                 if (head == vq->num) {
134                         virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
135                         /* We cannot finish yet if more buffers snuck in while
136                          * re-enabling notify.
137                          */
138                         if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
139                                 vhost_disable_notify(&vsock->dev, vq);
140                                 continue;
141                         }
142                         break;
143                 }
144
145                 if (out) {
146                         kfree_skb(skb);
147                         vq_err(vq, "Expected 0 output buffers, got %u\n", out);
148                         break;
149                 }
150
151                 iov_len = iov_length(&vq->iov[out], in);
152                 if (iov_len < sizeof(*hdr)) {
153                         kfree_skb(skb);
154                         vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
155                         break;
156                 }
157
158                 iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[out], in, iov_len);
159                 payload_len = skb->len;
160                 hdr = virtio_vsock_hdr(skb);
161
162                 /* If the packet is greater than the space available in the
163                  * buffer, we split it using multiple buffers.
164                  */
165                 if (payload_len > iov_len - sizeof(*hdr)) {
166                         payload_len = iov_len - sizeof(*hdr);
167
168                         /* As we are copying pieces of large packet's buffer to
169                          * small rx buffers, headers of packets in rx queue are
170                          * created dynamically and are initialized with header
171                          * of current packet(except length). But in case of
172                          * SOCK_SEQPACKET, we also must clear message delimeter
173                          * bit (VIRTIO_VSOCK_SEQ_EOM) and MSG_EOR bit
174                          * (VIRTIO_VSOCK_SEQ_EOR) if set. Otherwise,
175                          * there will be sequence of packets with these
176                          * bits set. After initialized header will be copied to
177                          * rx buffer, these required bits will be restored.
178                          */
179                         if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) {
180                                 hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
181                                 flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM;
182
183                                 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR) {
184                                         hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
185                                         flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR;
186                                 }
187                         }
188                 }
189
190                 /* Set the correct length in the header */
191                 hdr->len = cpu_to_le32(payload_len);
192
193                 nbytes = copy_to_iter(hdr, sizeof(*hdr), &iov_iter);
194                 if (nbytes != sizeof(*hdr)) {
195                         kfree_skb(skb);
196                         vq_err(vq, "Faulted on copying pkt hdr\n");
197                         break;
198                 }
199
200                 nbytes = copy_to_iter(skb->data, payload_len, &iov_iter);
201                 if (nbytes != payload_len) {
202                         kfree_skb(skb);
203                         vq_err(vq, "Faulted on copying pkt buf\n");
204                         break;
205                 }
206
207                 /* Deliver to monitoring devices all packets that we
208                  * will transmit.
209                  */
210                 virtio_transport_deliver_tap_pkt(skb);
211
212                 vhost_add_used(vq, head, sizeof(*hdr) + payload_len);
213                 added = true;
214
215                 skb_pull(skb, payload_len);
216                 total_len += payload_len;
217
218                 /* If we didn't send all the payload we can requeue the packet
219                  * to send it with the next available buffer.
220                  */
221                 if (skb->len > 0) {
222                         hdr->flags |= cpu_to_le32(flags_to_restore);
223
224                         /* We are queueing the same skb to handle
225                          * the remaining bytes, and we want to deliver it
226                          * to monitoring devices in the next iteration.
227                          */
228                         virtio_vsock_skb_clear_tap_delivered(skb);
229                         virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
230                 } else {
231                         if (virtio_vsock_skb_reply(skb)) {
232                                 int val;
233
234                                 val = atomic_dec_return(&vsock->queued_replies);
235
236                                 /* Do we have resources to resume tx
237                                  * processing?
238                                  */
239                                 if (val + 1 == tx_vq->num)
240                                         restart_tx = true;
241                         }
242
243                         consume_skb(skb);
244                 }
245         } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
246         if (added)
247                 vhost_signal(&vsock->dev, vq);
248
249 out:
250         mutex_unlock(&vq->mutex);
251
252         if (restart_tx)
253                 vhost_poll_queue(&tx_vq->poll);
254 }
255
256 static void vhost_transport_send_pkt_work(struct vhost_work *work)
257 {
258         struct vhost_virtqueue *vq;
259         struct vhost_vsock *vsock;
260
261         vsock = container_of(work, struct vhost_vsock, send_pkt_work);
262         vq = &vsock->vqs[VSOCK_VQ_RX];
263
264         vhost_transport_do_send_pkt(vsock, vq);
265 }
266
267 static int
268 vhost_transport_send_pkt(struct sk_buff *skb)
269 {
270         struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
271         struct vhost_vsock *vsock;
272         int len = skb->len;
273
274         rcu_read_lock();
275
276         /* Find the vhost_vsock according to guest context id  */
277         vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid));
278         if (!vsock) {
279                 rcu_read_unlock();
280                 kfree_skb(skb);
281                 return -ENODEV;
282         }
283
284         if (virtio_vsock_skb_reply(skb))
285                 atomic_inc(&vsock->queued_replies);
286
287         virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
288         vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
289
290         rcu_read_unlock();
291         return len;
292 }
293
294 static int
295 vhost_transport_cancel_pkt(struct vsock_sock *vsk)
296 {
297         struct vhost_vsock *vsock;
298         int cnt = 0;
299         int ret = -ENODEV;
300
301         rcu_read_lock();
302
303         /* Find the vhost_vsock according to guest context id  */
304         vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
305         if (!vsock)
306                 goto out;
307
308         cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue);
309
310         if (cnt) {
311                 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
312                 int new_cnt;
313
314                 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
315                 if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
316                         vhost_poll_queue(&tx_vq->poll);
317         }
318
319         ret = 0;
320 out:
321         rcu_read_unlock();
322         return ret;
323 }
324
325 static struct sk_buff *
326 vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
327                       unsigned int out, unsigned int in)
328 {
329         struct virtio_vsock_hdr *hdr;
330         struct iov_iter iov_iter;
331         struct sk_buff *skb;
332         size_t payload_len;
333         size_t nbytes;
334         size_t len;
335
336         if (in != 0) {
337                 vq_err(vq, "Expected 0 input buffers, got %u\n", in);
338                 return NULL;
339         }
340
341         len = iov_length(vq->iov, out);
342
343         /* len contains both payload and hdr */
344         skb = virtio_vsock_alloc_skb(len, GFP_KERNEL);
345         if (!skb)
346                 return NULL;
347
348         iov_iter_init(&iov_iter, ITER_SOURCE, vq->iov, out, len);
349
350         hdr = virtio_vsock_hdr(skb);
351         nbytes = copy_from_iter(hdr, sizeof(*hdr), &iov_iter);
352         if (nbytes != sizeof(*hdr)) {
353                 vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
354                        sizeof(*hdr), nbytes);
355                 kfree_skb(skb);
356                 return NULL;
357         }
358
359         payload_len = le32_to_cpu(hdr->len);
360
361         /* No payload */
362         if (!payload_len)
363                 return skb;
364
365         /* The pkt is too big or the length in the header is invalid */
366         if (payload_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE ||
367             payload_len + sizeof(*hdr) > len) {
368                 kfree_skb(skb);
369                 return NULL;
370         }
371
372         virtio_vsock_skb_rx_put(skb);
373
374         nbytes = copy_from_iter(skb->data, payload_len, &iov_iter);
375         if (nbytes != payload_len) {
376                 vq_err(vq, "Expected %zu byte payload, got %zu bytes\n",
377                        payload_len, nbytes);
378                 kfree_skb(skb);
379                 return NULL;
380         }
381
382         return skb;
383 }
384
385 /* Is there space left for replies to rx packets? */
386 static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
387 {
388         struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
389         int val;
390
391         smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
392         val = atomic_read(&vsock->queued_replies);
393
394         return val < vq->num;
395 }
396
397 static bool vhost_transport_seqpacket_allow(u32 remote_cid);
398
399 static struct virtio_transport vhost_transport = {
400         .transport = {
401                 .module                   = THIS_MODULE,
402
403                 .get_local_cid            = vhost_transport_get_local_cid,
404
405                 .init                     = virtio_transport_do_socket_init,
406                 .destruct                 = virtio_transport_destruct,
407                 .release                  = virtio_transport_release,
408                 .connect                  = virtio_transport_connect,
409                 .shutdown                 = virtio_transport_shutdown,
410                 .cancel_pkt               = vhost_transport_cancel_pkt,
411
412                 .dgram_enqueue            = virtio_transport_dgram_enqueue,
413                 .dgram_dequeue            = virtio_transport_dgram_dequeue,
414                 .dgram_bind               = virtio_transport_dgram_bind,
415                 .dgram_allow              = virtio_transport_dgram_allow,
416
417                 .stream_enqueue           = virtio_transport_stream_enqueue,
418                 .stream_dequeue           = virtio_transport_stream_dequeue,
419                 .stream_has_data          = virtio_transport_stream_has_data,
420                 .stream_has_space         = virtio_transport_stream_has_space,
421                 .stream_rcvhiwat          = virtio_transport_stream_rcvhiwat,
422                 .stream_is_active         = virtio_transport_stream_is_active,
423                 .stream_allow             = virtio_transport_stream_allow,
424
425                 .seqpacket_dequeue        = virtio_transport_seqpacket_dequeue,
426                 .seqpacket_enqueue        = virtio_transport_seqpacket_enqueue,
427                 .seqpacket_allow          = vhost_transport_seqpacket_allow,
428                 .seqpacket_has_data       = virtio_transport_seqpacket_has_data,
429
430                 .notify_poll_in           = virtio_transport_notify_poll_in,
431                 .notify_poll_out          = virtio_transport_notify_poll_out,
432                 .notify_recv_init         = virtio_transport_notify_recv_init,
433                 .notify_recv_pre_block    = virtio_transport_notify_recv_pre_block,
434                 .notify_recv_pre_dequeue  = virtio_transport_notify_recv_pre_dequeue,
435                 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
436                 .notify_send_init         = virtio_transport_notify_send_init,
437                 .notify_send_pre_block    = virtio_transport_notify_send_pre_block,
438                 .notify_send_pre_enqueue  = virtio_transport_notify_send_pre_enqueue,
439                 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
440                 .notify_buffer_size       = virtio_transport_notify_buffer_size,
441
442         },
443
444         .send_pkt = vhost_transport_send_pkt,
445 };
446
447 static bool vhost_transport_seqpacket_allow(u32 remote_cid)
448 {
449         struct vhost_vsock *vsock;
450         bool seqpacket_allow = false;
451
452         rcu_read_lock();
453         vsock = vhost_vsock_get(remote_cid);
454
455         if (vsock)
456                 seqpacket_allow = vsock->seqpacket_allow;
457
458         rcu_read_unlock();
459
460         return seqpacket_allow;
461 }
462
463 static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
464 {
465         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
466                                                   poll.work);
467         struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
468                                                  dev);
469         int head, pkts = 0, total_len = 0;
470         unsigned int out, in;
471         struct sk_buff *skb;
472         bool added = false;
473
474         mutex_lock(&vq->mutex);
475
476         if (!vhost_vq_get_backend(vq))
477                 goto out;
478
479         if (!vq_meta_prefetch(vq))
480                 goto out;
481
482         vhost_disable_notify(&vsock->dev, vq);
483         do {
484                 struct virtio_vsock_hdr *hdr;
485
486                 if (!vhost_vsock_more_replies(vsock)) {
487                         /* Stop tx until the device processes already
488                          * pending replies.  Leave tx virtqueue
489                          * callbacks disabled.
490                          */
491                         goto no_more_replies;
492                 }
493
494                 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
495                                          &out, &in, NULL, NULL);
496                 if (head < 0)
497                         break;
498
499                 if (head == vq->num) {
500                         if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
501                                 vhost_disable_notify(&vsock->dev, vq);
502                                 continue;
503                         }
504                         break;
505                 }
506
507                 skb = vhost_vsock_alloc_skb(vq, out, in);
508                 if (!skb) {
509                         vq_err(vq, "Faulted on pkt\n");
510                         continue;
511                 }
512
513                 total_len += sizeof(*hdr) + skb->len;
514
515                 /* Deliver to monitoring devices all received packets */
516                 virtio_transport_deliver_tap_pkt(skb);
517
518                 hdr = virtio_vsock_hdr(skb);
519
520                 /* Only accept correctly addressed packets */
521                 if (le64_to_cpu(hdr->src_cid) == vsock->guest_cid &&
522                     le64_to_cpu(hdr->dst_cid) ==
523                     vhost_transport_get_local_cid())
524                         virtio_transport_recv_pkt(&vhost_transport, skb);
525                 else
526                         kfree_skb(skb);
527
528                 vhost_add_used(vq, head, 0);
529                 added = true;
530         } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
531
532 no_more_replies:
533         if (added)
534                 vhost_signal(&vsock->dev, vq);
535
536 out:
537         mutex_unlock(&vq->mutex);
538 }
539
540 static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
541 {
542         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
543                                                 poll.work);
544         struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
545                                                  dev);
546
547         vhost_transport_do_send_pkt(vsock, vq);
548 }
549
550 static int vhost_vsock_start(struct vhost_vsock *vsock)
551 {
552         struct vhost_virtqueue *vq;
553         size_t i;
554         int ret;
555
556         mutex_lock(&vsock->dev.mutex);
557
558         ret = vhost_dev_check_owner(&vsock->dev);
559         if (ret)
560                 goto err;
561
562         for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
563                 vq = &vsock->vqs[i];
564
565                 mutex_lock(&vq->mutex);
566
567                 if (!vhost_vq_access_ok(vq)) {
568                         ret = -EFAULT;
569                         goto err_vq;
570                 }
571
572                 if (!vhost_vq_get_backend(vq)) {
573                         vhost_vq_set_backend(vq, vsock);
574                         ret = vhost_vq_init_access(vq);
575                         if (ret)
576                                 goto err_vq;
577                 }
578
579                 mutex_unlock(&vq->mutex);
580         }
581
582         /* Some packets may have been queued before the device was started,
583          * let's kick the send worker to send them.
584          */
585         vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
586
587         mutex_unlock(&vsock->dev.mutex);
588         return 0;
589
590 err_vq:
591         vhost_vq_set_backend(vq, NULL);
592         mutex_unlock(&vq->mutex);
593
594         for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
595                 vq = &vsock->vqs[i];
596
597                 mutex_lock(&vq->mutex);
598                 vhost_vq_set_backend(vq, NULL);
599                 mutex_unlock(&vq->mutex);
600         }
601 err:
602         mutex_unlock(&vsock->dev.mutex);
603         return ret;
604 }
605
606 static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner)
607 {
608         size_t i;
609         int ret = 0;
610
611         mutex_lock(&vsock->dev.mutex);
612
613         if (check_owner) {
614                 ret = vhost_dev_check_owner(&vsock->dev);
615                 if (ret)
616                         goto err;
617         }
618
619         for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
620                 struct vhost_virtqueue *vq = &vsock->vqs[i];
621
622                 mutex_lock(&vq->mutex);
623                 vhost_vq_set_backend(vq, NULL);
624                 mutex_unlock(&vq->mutex);
625         }
626
627 err:
628         mutex_unlock(&vsock->dev.mutex);
629         return ret;
630 }
631
632 static void vhost_vsock_free(struct vhost_vsock *vsock)
633 {
634         kvfree(vsock);
635 }
636
637 static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
638 {
639         struct vhost_virtqueue **vqs;
640         struct vhost_vsock *vsock;
641         int ret;
642
643         /* This struct is large and allocation could fail, fall back to vmalloc
644          * if there is no other way.
645          */
646         vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
647         if (!vsock)
648                 return -ENOMEM;
649
650         vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
651         if (!vqs) {
652                 ret = -ENOMEM;
653                 goto out;
654         }
655
656         vsock->guest_cid = 0; /* no CID assigned yet */
657
658         atomic_set(&vsock->queued_replies, 0);
659
660         vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
661         vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
662         vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
663         vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
664
665         vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
666                        UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
667                        VHOST_VSOCK_WEIGHT, true, NULL);
668
669         file->private_data = vsock;
670         skb_queue_head_init(&vsock->send_pkt_queue);
671         vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
672         return 0;
673
674 out:
675         vhost_vsock_free(vsock);
676         return ret;
677 }
678
679 static void vhost_vsock_flush(struct vhost_vsock *vsock)
680 {
681         vhost_dev_flush(&vsock->dev);
682 }
683
684 static void vhost_vsock_reset_orphans(struct sock *sk)
685 {
686         struct vsock_sock *vsk = vsock_sk(sk);
687
688         /* vmci_transport.c doesn't take sk_lock here either.  At least we're
689          * under vsock_table_lock so the sock cannot disappear while we're
690          * executing.
691          */
692
693         /* If the peer is still valid, no need to reset connection */
694         if (vhost_vsock_get(vsk->remote_addr.svm_cid))
695                 return;
696
697         /* If the close timeout is pending, let it expire.  This avoids races
698          * with the timeout callback.
699          */
700         if (vsk->close_work_scheduled)
701                 return;
702
703         sock_set_flag(sk, SOCK_DONE);
704         vsk->peer_shutdown = SHUTDOWN_MASK;
705         sk->sk_state = SS_UNCONNECTED;
706         sk->sk_err = ECONNRESET;
707         sk_error_report(sk);
708 }
709
710 static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
711 {
712         struct vhost_vsock *vsock = file->private_data;
713
714         mutex_lock(&vhost_vsock_mutex);
715         if (vsock->guest_cid)
716                 hash_del_rcu(&vsock->hash);
717         mutex_unlock(&vhost_vsock_mutex);
718
719         /* Wait for other CPUs to finish using vsock */
720         synchronize_rcu();
721
722         /* Iterating over all connections for all CIDs to find orphans is
723          * inefficient.  Room for improvement here. */
724         vsock_for_each_connected_socket(&vhost_transport.transport,
725                                         vhost_vsock_reset_orphans);
726
727         /* Don't check the owner, because we are in the release path, so we
728          * need to stop the vsock device in any case.
729          * vhost_vsock_stop() can not fail in this case, so we don't need to
730          * check the return code.
731          */
732         vhost_vsock_stop(vsock, false);
733         vhost_vsock_flush(vsock);
734         vhost_dev_stop(&vsock->dev);
735
736         virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
737
738         vhost_dev_cleanup(&vsock->dev);
739         kfree(vsock->dev.vqs);
740         vhost_vsock_free(vsock);
741         return 0;
742 }
743
744 static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
745 {
746         struct vhost_vsock *other;
747
748         /* Refuse reserved CIDs */
749         if (guest_cid <= VMADDR_CID_HOST ||
750             guest_cid == U32_MAX)
751                 return -EINVAL;
752
753         /* 64-bit CIDs are not yet supported */
754         if (guest_cid > U32_MAX)
755                 return -EINVAL;
756
757         /* Refuse if CID is assigned to the guest->host transport (i.e. nested
758          * VM), to make the loopback work.
759          */
760         if (vsock_find_cid(guest_cid))
761                 return -EADDRINUSE;
762
763         /* Refuse if CID is already in use */
764         mutex_lock(&vhost_vsock_mutex);
765         other = vhost_vsock_get(guest_cid);
766         if (other && other != vsock) {
767                 mutex_unlock(&vhost_vsock_mutex);
768                 return -EADDRINUSE;
769         }
770
771         if (vsock->guest_cid)
772                 hash_del_rcu(&vsock->hash);
773
774         vsock->guest_cid = guest_cid;
775         hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
776         mutex_unlock(&vhost_vsock_mutex);
777
778         return 0;
779 }
780
781 static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
782 {
783         struct vhost_virtqueue *vq;
784         int i;
785
786         if (features & ~VHOST_VSOCK_FEATURES)
787                 return -EOPNOTSUPP;
788
789         mutex_lock(&vsock->dev.mutex);
790         if ((features & (1 << VHOST_F_LOG_ALL)) &&
791             !vhost_log_access_ok(&vsock->dev)) {
792                 goto err;
793         }
794
795         if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
796                 if (vhost_init_device_iotlb(&vsock->dev, true))
797                         goto err;
798         }
799
800         if (features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET))
801                 vsock->seqpacket_allow = true;
802
803         for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
804                 vq = &vsock->vqs[i];
805                 mutex_lock(&vq->mutex);
806                 vq->acked_features = features;
807                 mutex_unlock(&vq->mutex);
808         }
809         mutex_unlock(&vsock->dev.mutex);
810         return 0;
811
812 err:
813         mutex_unlock(&vsock->dev.mutex);
814         return -EFAULT;
815 }
816
817 static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
818                                   unsigned long arg)
819 {
820         struct vhost_vsock *vsock = f->private_data;
821         void __user *argp = (void __user *)arg;
822         u64 guest_cid;
823         u64 features;
824         int start;
825         int r;
826
827         switch (ioctl) {
828         case VHOST_VSOCK_SET_GUEST_CID:
829                 if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
830                         return -EFAULT;
831                 return vhost_vsock_set_cid(vsock, guest_cid);
832         case VHOST_VSOCK_SET_RUNNING:
833                 if (copy_from_user(&start, argp, sizeof(start)))
834                         return -EFAULT;
835                 if (start)
836                         return vhost_vsock_start(vsock);
837                 else
838                         return vhost_vsock_stop(vsock, true);
839         case VHOST_GET_FEATURES:
840                 features = VHOST_VSOCK_FEATURES;
841                 if (copy_to_user(argp, &features, sizeof(features)))
842                         return -EFAULT;
843                 return 0;
844         case VHOST_SET_FEATURES:
845                 if (copy_from_user(&features, argp, sizeof(features)))
846                         return -EFAULT;
847                 return vhost_vsock_set_features(vsock, features);
848         case VHOST_GET_BACKEND_FEATURES:
849                 features = VHOST_VSOCK_BACKEND_FEATURES;
850                 if (copy_to_user(argp, &features, sizeof(features)))
851                         return -EFAULT;
852                 return 0;
853         case VHOST_SET_BACKEND_FEATURES:
854                 if (copy_from_user(&features, argp, sizeof(features)))
855                         return -EFAULT;
856                 if (features & ~VHOST_VSOCK_BACKEND_FEATURES)
857                         return -EOPNOTSUPP;
858                 vhost_set_backend_features(&vsock->dev, features);
859                 return 0;
860         default:
861                 mutex_lock(&vsock->dev.mutex);
862                 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
863                 if (r == -ENOIOCTLCMD)
864                         r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
865                 else
866                         vhost_vsock_flush(vsock);
867                 mutex_unlock(&vsock->dev.mutex);
868                 return r;
869         }
870 }
871
872 static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
873 {
874         struct file *file = iocb->ki_filp;
875         struct vhost_vsock *vsock = file->private_data;
876         struct vhost_dev *dev = &vsock->dev;
877         int noblock = file->f_flags & O_NONBLOCK;
878
879         return vhost_chr_read_iter(dev, to, noblock);
880 }
881
882 static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb,
883                                         struct iov_iter *from)
884 {
885         struct file *file = iocb->ki_filp;
886         struct vhost_vsock *vsock = file->private_data;
887         struct vhost_dev *dev = &vsock->dev;
888
889         return vhost_chr_write_iter(dev, from);
890 }
891
892 static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait)
893 {
894         struct vhost_vsock *vsock = file->private_data;
895         struct vhost_dev *dev = &vsock->dev;
896
897         return vhost_chr_poll(file, dev, wait);
898 }
899
900 static const struct file_operations vhost_vsock_fops = {
901         .owner          = THIS_MODULE,
902         .open           = vhost_vsock_dev_open,
903         .release        = vhost_vsock_dev_release,
904         .llseek         = noop_llseek,
905         .unlocked_ioctl = vhost_vsock_dev_ioctl,
906         .compat_ioctl   = compat_ptr_ioctl,
907         .read_iter      = vhost_vsock_chr_read_iter,
908         .write_iter     = vhost_vsock_chr_write_iter,
909         .poll           = vhost_vsock_chr_poll,
910 };
911
912 static struct miscdevice vhost_vsock_misc = {
913         .minor = VHOST_VSOCK_MINOR,
914         .name = "vhost-vsock",
915         .fops = &vhost_vsock_fops,
916 };
917
918 static int __init vhost_vsock_init(void)
919 {
920         int ret;
921
922         ret = vsock_core_register(&vhost_transport.transport,
923                                   VSOCK_TRANSPORT_F_H2G);
924         if (ret < 0)
925                 return ret;
926
927         ret = misc_register(&vhost_vsock_misc);
928         if (ret) {
929                 vsock_core_unregister(&vhost_transport.transport);
930                 return ret;
931         }
932
933         return 0;
934 };
935
936 static void __exit vhost_vsock_exit(void)
937 {
938         misc_deregister(&vhost_vsock_misc);
939         vsock_core_unregister(&vhost_transport.transport);
940 };
941
942 module_init(vhost_vsock_init);
943 module_exit(vhost_vsock_exit);
944 MODULE_LICENSE("GPL v2");
945 MODULE_AUTHOR("Asias He");
946 MODULE_DESCRIPTION("vhost transport for vsock ");
947 MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR);
948 MODULE_ALIAS("devname:vhost-vsock");