testing/vsock: add vsock_perf to gitignore
[linux-2.6-block.git] / drivers / vhost / vsock.c
CommitLineData
7a338472 1// SPDX-License-Identifier: GPL-2.0-only
433fc58e
AH
2/*
3 * vhost transport for vsock
4 *
5 * Copyright (C) 2013-2015 Red Hat, Inc.
6 * Author: Asias He <asias@redhat.com>
7 * Stefan Hajnoczi <stefanha@redhat.com>
433fc58e
AH
8 */
9#include <linux/miscdevice.h>
10#include <linux/atomic.h>
11#include <linux/module.h>
12#include <linux/mutex.h>
13#include <linux/vmalloc.h>
14#include <net/sock.h>
15#include <linux/virtio_vsock.h>
16#include <linux/vhost.h>
834e772c 17#include <linux/hashtable.h>
433fc58e
AH
18
19#include <net/af_vsock.h>
20#include "vhost.h"
21
22#define VHOST_VSOCK_DEFAULT_HOST_CID 2
e82b9b07
JW
23/* Max number of bytes transferred before requeueing the job.
24 * Using this limit prevents one virtqueue from starving others. */
25#define VHOST_VSOCK_WEIGHT 0x80000
26/* Max number of packets transferred before requeueing the job.
27 * Using this limit prevents one virtqueue from starving others with
28 * small pkts.
29 */
30#define VHOST_VSOCK_PKT_WEIGHT 256
433fc58e
AH
31
32enum {
e13a6915 33 VHOST_VSOCK_FEATURES = VHOST_FEATURES |
ced7b713
AK
34 (1ULL << VIRTIO_F_ACCESS_PLATFORM) |
35 (1ULL << VIRTIO_VSOCK_F_SEQPACKET)
e13a6915
SG
36};
37
38enum {
39 VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
433fc58e
AH
40};
41
42/* Used to track all the vhost_vsock instances on the system. */
6db3d8dc 43static DEFINE_MUTEX(vhost_vsock_mutex);
834e772c 44static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
433fc58e
AH
45
46struct vhost_vsock {
47 struct vhost_dev dev;
48 struct vhost_virtqueue vqs[2];
49
6db3d8dc 50 /* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
834e772c 51 struct hlist_node hash;
433fc58e
AH
52
53 struct vhost_work send_pkt_work;
71dc9ec9 54 struct sk_buff_head send_pkt_queue; /* host->guest pending packets */
433fc58e
AH
55
56 atomic_t queued_replies;
57
58 u32 guest_cid;
ced7b713 59 bool seqpacket_allow;
433fc58e
AH
60};
61
62static u32 vhost_transport_get_local_cid(void)
63{
64 return VHOST_VSOCK_DEFAULT_HOST_CID;
65}
66
6db3d8dc 67/* Callers that dereference the return value must hold vhost_vsock_mutex or the
834e772c
SH
68 * RCU read lock.
69 */
70static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
433fc58e
AH
71{
72 struct vhost_vsock *vsock;
73
834e772c 74 hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
433fc58e
AH
75 u32 other_cid = vsock->guest_cid;
76
77 /* Skip instances that have no CID yet */
78 if (other_cid == 0)
79 continue;
80
ff3c1b1a 81 if (other_cid == guest_cid)
433fc58e 82 return vsock;
ff3c1b1a 83
433fc58e 84 }
433fc58e
AH
85
86 return NULL;
87}
88
89static void
90vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
91 struct vhost_virtqueue *vq)
92{
93 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
e79b431f 94 int pkts = 0, total_len = 0;
433fc58e
AH
95 bool added = false;
96 bool restart_tx = false;
97
98 mutex_lock(&vq->mutex);
99
247643f8 100 if (!vhost_vq_get_backend(vq))
433fc58e
AH
101 goto out;
102
e13a6915
SG
103 if (!vq_meta_prefetch(vq))
104 goto out;
105
433fc58e
AH
106 /* Avoid further vmexits, we're already processing the virtqueue */
107 vhost_disable_notify(&vsock->dev, vq);
108
e79b431f 109 do {
71dc9ec9
BE
110 struct virtio_vsock_hdr *hdr;
111 size_t iov_len, payload_len;
433fc58e 112 struct iov_iter iov_iter;
71dc9ec9
BE
113 u32 flags_to_restore = 0;
114 struct sk_buff *skb;
433fc58e
AH
115 unsigned out, in;
116 size_t nbytes;
433fc58e
AH
117 int head;
118
71dc9ec9
BE
119 skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
120
121 if (!skb) {
433fc58e
AH
122 vhost_enable_notify(&vsock->dev, vq);
123 break;
124 }
125
433fc58e
AH
126 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
127 &out, &in, NULL, NULL);
128 if (head < 0) {
71dc9ec9 129 virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
433fc58e
AH
130 break;
131 }
132
133 if (head == vq->num) {
71dc9ec9 134 virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
433fc58e
AH
135 /* We cannot finish yet if more buffers snuck in while
136 * re-enabling notify.
137 */
138 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
139 vhost_disable_notify(&vsock->dev, vq);
140 continue;
141 }
142 break;
143 }
144
145 if (out) {
71dc9ec9 146 kfree_skb(skb);
433fc58e
AH
147 vq_err(vq, "Expected 0 output buffers, got %u\n", out);
148 break;
149 }
150
6dbd3e66 151 iov_len = iov_length(&vq->iov[out], in);
71dc9ec9
BE
152 if (iov_len < sizeof(*hdr)) {
153 kfree_skb(skb);
6dbd3e66
SG
154 vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
155 break;
156 }
157
de4eda9d 158 iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[out], in, iov_len);
71dc9ec9
BE
159 payload_len = skb->len;
160 hdr = virtio_vsock_hdr(skb);
6dbd3e66
SG
161
162 /* If the packet is greater than the space available in the
163 * buffer, we split it using multiple buffers.
164 */
71dc9ec9
BE
165 if (payload_len > iov_len - sizeof(*hdr)) {
166 payload_len = iov_len - sizeof(*hdr);
6dbd3e66 167
ced7b713
AK
168 /* As we are copying pieces of large packet's buffer to
169 * small rx buffers, headers of packets in rx queue are
170 * created dynamically and are initialized with header
171 * of current packet(except length). But in case of
9af8f106 172 * SOCK_SEQPACKET, we also must clear message delimeter
1af7e555
AK
173 * bit (VIRTIO_VSOCK_SEQ_EOM) and MSG_EOR bit
174 * (VIRTIO_VSOCK_SEQ_EOR) if set. Otherwise,
175 * there will be sequence of packets with these
176 * bits set. After initialized header will be copied to
177 * rx buffer, these required bits will be restored.
ced7b713 178 */
71dc9ec9
BE
179 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) {
180 hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
1af7e555
AK
181 flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM;
182
71dc9ec9
BE
183 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR) {
184 hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
1af7e555
AK
185 flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR;
186 }
ced7b713
AK
187 }
188 }
189
6dbd3e66 190 /* Set the correct length in the header */
71dc9ec9 191 hdr->len = cpu_to_le32(payload_len);
433fc58e 192
71dc9ec9
BE
193 nbytes = copy_to_iter(hdr, sizeof(*hdr), &iov_iter);
194 if (nbytes != sizeof(*hdr)) {
195 kfree_skb(skb);
433fc58e
AH
196 vq_err(vq, "Faulted on copying pkt hdr\n");
197 break;
198 }
199
71dc9ec9 200 nbytes = copy_to_iter(skb->data, payload_len, &iov_iter);
6dbd3e66 201 if (nbytes != payload_len) {
71dc9ec9 202 kfree_skb(skb);
433fc58e
AH
203 vq_err(vq, "Faulted on copying pkt buf\n");
204 break;
205 }
206
107bc076
SG
207 /* Deliver to monitoring devices all packets that we
208 * will transmit.
82dfb540 209 */
71dc9ec9 210 virtio_transport_deliver_tap_pkt(skb);
82dfb540 211
71dc9ec9 212 vhost_add_used(vq, head, sizeof(*hdr) + payload_len);
107bc076
SG
213 added = true;
214
71dc9ec9 215 skb_pull(skb, payload_len);
6dbd3e66
SG
216 total_len += payload_len;
217
218 /* If we didn't send all the payload we can requeue the packet
219 * to send it with the next available buffer.
220 */
71dc9ec9
BE
221 if (skb->len > 0) {
222 hdr->flags |= cpu_to_le32(flags_to_restore);
ced7b713 223
71dc9ec9 224 /* We are queueing the same skb to handle
a78d1639
SG
225 * the remaining bytes, and we want to deliver it
226 * to monitoring devices in the next iteration.
227 */
71dc9ec9
BE
228 virtio_vsock_skb_clear_tap_delivered(skb);
229 virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
6dbd3e66 230 } else {
71dc9ec9 231 if (virtio_vsock_skb_reply(skb)) {
6dbd3e66
SG
232 int val;
233
234 val = atomic_dec_return(&vsock->queued_replies);
235
236 /* Do we have resources to resume tx
237 * processing?
238 */
239 if (val + 1 == tx_vq->num)
240 restart_tx = true;
241 }
242
71dc9ec9 243 consume_skb(skb);
6dbd3e66 244 }
e79b431f 245 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
433fc58e
AH
246 if (added)
247 vhost_signal(&vsock->dev, vq);
248
249out:
250 mutex_unlock(&vq->mutex);
251
252 if (restart_tx)
253 vhost_poll_queue(&tx_vq->poll);
254}
255
256static void vhost_transport_send_pkt_work(struct vhost_work *work)
257{
258 struct vhost_virtqueue *vq;
259 struct vhost_vsock *vsock;
260
261 vsock = container_of(work, struct vhost_vsock, send_pkt_work);
262 vq = &vsock->vqs[VSOCK_VQ_RX];
263
264 vhost_transport_do_send_pkt(vsock, vq);
265}
266
267static int
71dc9ec9 268vhost_transport_send_pkt(struct sk_buff *skb)
433fc58e 269{
71dc9ec9 270 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
433fc58e 271 struct vhost_vsock *vsock;
71dc9ec9 272 int len = skb->len;
433fc58e 273
834e772c
SH
274 rcu_read_lock();
275
433fc58e 276 /* Find the vhost_vsock according to guest context id */
71dc9ec9 277 vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid));
433fc58e 278 if (!vsock) {
834e772c 279 rcu_read_unlock();
71dc9ec9 280 kfree_skb(skb);
433fc58e
AH
281 return -ENODEV;
282 }
283
71dc9ec9 284 if (virtio_vsock_skb_reply(skb))
433fc58e
AH
285 atomic_inc(&vsock->queued_replies);
286
71dc9ec9 287 virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
433fc58e 288 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
834e772c
SH
289
290 rcu_read_unlock();
433fc58e
AH
291 return len;
292}
293
16320f36
PT
294static int
295vhost_transport_cancel_pkt(struct vsock_sock *vsk)
296{
297 struct vhost_vsock *vsock;
16320f36 298 int cnt = 0;
834e772c 299 int ret = -ENODEV;
16320f36 300
834e772c
SH
301 rcu_read_lock();
302
16320f36
PT
303 /* Find the vhost_vsock according to guest context id */
304 vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
305 if (!vsock)
834e772c 306 goto out;
16320f36 307
71dc9ec9 308 cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue);
16320f36
PT
309
310 if (cnt) {
311 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
312 int new_cnt;
313
314 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
315 if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
316 vhost_poll_queue(&tx_vq->poll);
317 }
318
834e772c
SH
319 ret = 0;
320out:
321 rcu_read_unlock();
322 return ret;
16320f36
PT
323}
324
71dc9ec9
BE
325static struct sk_buff *
326vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
433fc58e
AH
327 unsigned int out, unsigned int in)
328{
71dc9ec9 329 struct virtio_vsock_hdr *hdr;
433fc58e 330 struct iov_iter iov_iter;
71dc9ec9
BE
331 struct sk_buff *skb;
332 size_t payload_len;
433fc58e
AH
333 size_t nbytes;
334 size_t len;
335
336 if (in != 0) {
337 vq_err(vq, "Expected 0 input buffers, got %u\n", in);
338 return NULL;
339 }
340
71dc9ec9
BE
341 len = iov_length(vq->iov, out);
342
343 /* len contains both payload and hdr */
344 skb = virtio_vsock_alloc_skb(len, GFP_KERNEL);
345 if (!skb)
433fc58e
AH
346 return NULL;
347
de4eda9d 348 iov_iter_init(&iov_iter, ITER_SOURCE, vq->iov, out, len);
433fc58e 349
71dc9ec9
BE
350 hdr = virtio_vsock_hdr(skb);
351 nbytes = copy_from_iter(hdr, sizeof(*hdr), &iov_iter);
352 if (nbytes != sizeof(*hdr)) {
433fc58e 353 vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
71dc9ec9
BE
354 sizeof(*hdr), nbytes);
355 kfree_skb(skb);
433fc58e
AH
356 return NULL;
357 }
358
71dc9ec9 359 payload_len = le32_to_cpu(hdr->len);
433fc58e
AH
360
361 /* No payload */
71dc9ec9
BE
362 if (!payload_len)
363 return skb;
433fc58e 364
71dc9ec9
BE
365 /* The pkt is too big or the length in the header is invalid */
366 if (payload_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE ||
367 payload_len + sizeof(*hdr) > len) {
368 kfree_skb(skb);
433fc58e
AH
369 return NULL;
370 }
371
71dc9ec9 372 virtio_vsock_skb_rx_put(skb);
433fc58e 373
71dc9ec9
BE
374 nbytes = copy_from_iter(skb->data, payload_len, &iov_iter);
375 if (nbytes != payload_len) {
376 vq_err(vq, "Expected %zu byte payload, got %zu bytes\n",
377 payload_len, nbytes);
378 kfree_skb(skb);
433fc58e
AH
379 return NULL;
380 }
381
71dc9ec9 382 return skb;
433fc58e
AH
383}
384
385/* Is there space left for replies to rx packets? */
386static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
387{
388 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
389 int val;
390
391 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
392 val = atomic_read(&vsock->queued_replies);
393
394 return val < vq->num;
395}
396
ced7b713
AK
397static bool vhost_transport_seqpacket_allow(u32 remote_cid);
398
4c7246dc
SG
399static struct virtio_transport vhost_transport = {
400 .transport = {
6a2c0962
SG
401 .module = THIS_MODULE,
402
4c7246dc
SG
403 .get_local_cid = vhost_transport_get_local_cid,
404
405 .init = virtio_transport_do_socket_init,
406 .destruct = virtio_transport_destruct,
407 .release = virtio_transport_release,
408 .connect = virtio_transport_connect,
409 .shutdown = virtio_transport_shutdown,
410 .cancel_pkt = vhost_transport_cancel_pkt,
411
412 .dgram_enqueue = virtio_transport_dgram_enqueue,
413 .dgram_dequeue = virtio_transport_dgram_dequeue,
414 .dgram_bind = virtio_transport_dgram_bind,
415 .dgram_allow = virtio_transport_dgram_allow,
416
417 .stream_enqueue = virtio_transport_stream_enqueue,
418 .stream_dequeue = virtio_transport_stream_dequeue,
419 .stream_has_data = virtio_transport_stream_has_data,
420 .stream_has_space = virtio_transport_stream_has_space,
421 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
422 .stream_is_active = virtio_transport_stream_is_active,
423 .stream_allow = virtio_transport_stream_allow,
424
ced7b713
AK
425 .seqpacket_dequeue = virtio_transport_seqpacket_dequeue,
426 .seqpacket_enqueue = virtio_transport_seqpacket_enqueue,
427 .seqpacket_allow = vhost_transport_seqpacket_allow,
428 .seqpacket_has_data = virtio_transport_seqpacket_has_data,
429
4c7246dc
SG
430 .notify_poll_in = virtio_transport_notify_poll_in,
431 .notify_poll_out = virtio_transport_notify_poll_out,
432 .notify_recv_init = virtio_transport_notify_recv_init,
433 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
434 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
435 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
436 .notify_send_init = virtio_transport_notify_send_init,
437 .notify_send_pre_block = virtio_transport_notify_send_pre_block,
438 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
439 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
b9f2b0ff 440 .notify_buffer_size = virtio_transport_notify_buffer_size,
4c7246dc 441
4c7246dc
SG
442 },
443
444 .send_pkt = vhost_transport_send_pkt,
445};
446
ced7b713
AK
447static bool vhost_transport_seqpacket_allow(u32 remote_cid)
448{
449 struct vhost_vsock *vsock;
450 bool seqpacket_allow = false;
451
452 rcu_read_lock();
453 vsock = vhost_vsock_get(remote_cid);
454
455 if (vsock)
456 seqpacket_allow = vsock->seqpacket_allow;
457
458 rcu_read_unlock();
459
460 return seqpacket_allow;
461}
462
433fc58e
AH
463static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
464{
465 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
466 poll.work);
467 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
468 dev);
e79b431f 469 int head, pkts = 0, total_len = 0;
433fc58e 470 unsigned int out, in;
71dc9ec9 471 struct sk_buff *skb;
433fc58e
AH
472 bool added = false;
473
474 mutex_lock(&vq->mutex);
475
247643f8 476 if (!vhost_vq_get_backend(vq))
433fc58e
AH
477 goto out;
478
e13a6915
SG
479 if (!vq_meta_prefetch(vq))
480 goto out;
481
433fc58e 482 vhost_disable_notify(&vsock->dev, vq);
e79b431f 483 do {
71dc9ec9
BE
484 struct virtio_vsock_hdr *hdr;
485
433fc58e
AH
486 if (!vhost_vsock_more_replies(vsock)) {
487 /* Stop tx until the device processes already
488 * pending replies. Leave tx virtqueue
489 * callbacks disabled.
490 */
491 goto no_more_replies;
492 }
493
494 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
495 &out, &in, NULL, NULL);
496 if (head < 0)
497 break;
498
499 if (head == vq->num) {
500 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
501 vhost_disable_notify(&vsock->dev, vq);
502 continue;
503 }
504 break;
505 }
506
71dc9ec9
BE
507 skb = vhost_vsock_alloc_skb(vq, out, in);
508 if (!skb) {
433fc58e
AH
509 vq_err(vq, "Faulted on pkt\n");
510 continue;
511 }
512
71dc9ec9 513 total_len += sizeof(*hdr) + skb->len;
3fda5d6e 514
82dfb540 515 /* Deliver to monitoring devices all received packets */
71dc9ec9
BE
516 virtio_transport_deliver_tap_pkt(skb);
517
518 hdr = virtio_vsock_hdr(skb);
82dfb540 519
433fc58e 520 /* Only accept correctly addressed packets */
71dc9ec9
BE
521 if (le64_to_cpu(hdr->src_cid) == vsock->guest_cid &&
522 le64_to_cpu(hdr->dst_cid) ==
8a3cc29c 523 vhost_transport_get_local_cid())
71dc9ec9 524 virtio_transport_recv_pkt(&vhost_transport, skb);
433fc58e 525 else
71dc9ec9 526 kfree_skb(skb);
433fc58e 527
49d8c5ff 528 vhost_add_used(vq, head, 0);
433fc58e 529 added = true;
e79b431f 530 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
433fc58e
AH
531
532no_more_replies:
533 if (added)
534 vhost_signal(&vsock->dev, vq);
535
536out:
537 mutex_unlock(&vq->mutex);
538}
539
540static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
541{
542 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
543 poll.work);
544 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
545 dev);
546
547 vhost_transport_do_send_pkt(vsock, vq);
548}
549
550static int vhost_vsock_start(struct vhost_vsock *vsock)
551{
0516ffd8 552 struct vhost_virtqueue *vq;
433fc58e
AH
553 size_t i;
554 int ret;
555
556 mutex_lock(&vsock->dev.mutex);
557
558 ret = vhost_dev_check_owner(&vsock->dev);
559 if (ret)
560 goto err;
561
562 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
0516ffd8 563 vq = &vsock->vqs[i];
433fc58e
AH
564
565 mutex_lock(&vq->mutex);
566
567 if (!vhost_vq_access_ok(vq)) {
568 ret = -EFAULT;
433fc58e
AH
569 goto err_vq;
570 }
571
247643f8
EP
572 if (!vhost_vq_get_backend(vq)) {
573 vhost_vq_set_backend(vq, vsock);
0516ffd8
SH
574 ret = vhost_vq_init_access(vq);
575 if (ret)
576 goto err_vq;
433fc58e
AH
577 }
578
579 mutex_unlock(&vq->mutex);
580 }
581
0b841030
JH
582 /* Some packets may have been queued before the device was started,
583 * let's kick the send worker to send them.
584 */
585 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
586
433fc58e
AH
587 mutex_unlock(&vsock->dev.mutex);
588 return 0;
589
590err_vq:
247643f8 591 vhost_vq_set_backend(vq, NULL);
0516ffd8
SH
592 mutex_unlock(&vq->mutex);
593
433fc58e 594 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
0516ffd8 595 vq = &vsock->vqs[i];
433fc58e
AH
596
597 mutex_lock(&vq->mutex);
247643f8 598 vhost_vq_set_backend(vq, NULL);
433fc58e
AH
599 mutex_unlock(&vq->mutex);
600 }
601err:
602 mutex_unlock(&vsock->dev.mutex);
603 return ret;
604}
605
a58da53f 606static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner)
433fc58e
AH
607{
608 size_t i;
a58da53f 609 int ret = 0;
433fc58e
AH
610
611 mutex_lock(&vsock->dev.mutex);
612
a58da53f
SG
613 if (check_owner) {
614 ret = vhost_dev_check_owner(&vsock->dev);
615 if (ret)
616 goto err;
617 }
433fc58e
AH
618
619 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
620 struct vhost_virtqueue *vq = &vsock->vqs[i];
621
622 mutex_lock(&vq->mutex);
247643f8 623 vhost_vq_set_backend(vq, NULL);
433fc58e
AH
624 mutex_unlock(&vq->mutex);
625 }
626
627err:
628 mutex_unlock(&vsock->dev.mutex);
629 return ret;
630}
631
632static void vhost_vsock_free(struct vhost_vsock *vsock)
633{
b226acab 634 kvfree(vsock);
433fc58e
AH
635}
636
637static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
638{
639 struct vhost_virtqueue **vqs;
640 struct vhost_vsock *vsock;
641 int ret;
642
643 /* This struct is large and allocation could fail, fall back to vmalloc
644 * if there is no other way.
645 */
dcda9b04 646 vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
6c5ab651
MH
647 if (!vsock)
648 return -ENOMEM;
433fc58e
AH
649
650 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
651 if (!vqs) {
652 ret = -ENOMEM;
653 goto out;
654 }
655
a72b69dc
SH
656 vsock->guest_cid = 0; /* no CID assigned yet */
657
433fc58e
AH
658 atomic_set(&vsock->queued_replies, 0);
659
660 vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
661 vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
662 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
663 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
664
e82b9b07
JW
665 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
666 UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
01fcb1cb 667 VHOST_VSOCK_WEIGHT, true, NULL);
433fc58e
AH
668
669 file->private_data = vsock;
71dc9ec9 670 skb_queue_head_init(&vsock->send_pkt_queue);
433fc58e 671 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
433fc58e
AH
672 return 0;
673
674out:
675 vhost_vsock_free(vsock);
676 return ret;
677}
678
679static void vhost_vsock_flush(struct vhost_vsock *vsock)
680{
b2ffa407 681 vhost_dev_flush(&vsock->dev);
433fc58e
AH
682}
683
684static void vhost_vsock_reset_orphans(struct sock *sk)
685{
686 struct vsock_sock *vsk = vsock_sk(sk);
687
688 /* vmci_transport.c doesn't take sk_lock here either. At least we're
689 * under vsock_table_lock so the sock cannot disappear while we're
690 * executing.
691 */
692
c38f57da
SH
693 /* If the peer is still valid, no need to reset connection */
694 if (vhost_vsock_get(vsk->remote_addr.svm_cid))
695 return;
696
697 /* If the close timeout is pending, let it expire. This avoids races
698 * with the timeout callback.
699 */
700 if (vsk->close_work_scheduled)
701 return;
702
703 sock_set_flag(sk, SOCK_DONE);
704 vsk->peer_shutdown = SHUTDOWN_MASK;
705 sk->sk_state = SS_UNCONNECTED;
706 sk->sk_err = ECONNRESET;
e3ae2365 707 sk_error_report(sk);
433fc58e
AH
708}
709
710static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
711{
712 struct vhost_vsock *vsock = file->private_data;
713
6db3d8dc 714 mutex_lock(&vhost_vsock_mutex);
834e772c
SH
715 if (vsock->guest_cid)
716 hash_del_rcu(&vsock->hash);
6db3d8dc 717 mutex_unlock(&vhost_vsock_mutex);
433fc58e 718
834e772c
SH
719 /* Wait for other CPUs to finish using vsock */
720 synchronize_rcu();
721
433fc58e
AH
722 /* Iterating over all connections for all CIDs to find orphans is
723 * inefficient. Room for improvement here. */
8e6ed963
JP
724 vsock_for_each_connected_socket(&vhost_transport.transport,
725 vhost_vsock_reset_orphans);
433fc58e 726
a58da53f
SG
727 /* Don't check the owner, because we are in the release path, so we
728 * need to stop the vsock device in any case.
729 * vhost_vsock_stop() can not fail in this case, so we don't need to
730 * check the return code.
731 */
732 vhost_vsock_stop(vsock, false);
433fc58e
AH
733 vhost_vsock_flush(vsock);
734 vhost_dev_stop(&vsock->dev);
735
71dc9ec9 736 virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
433fc58e 737
f6f93f75 738 vhost_dev_cleanup(&vsock->dev);
433fc58e
AH
739 kfree(vsock->dev.vqs);
740 vhost_vsock_free(vsock);
741 return 0;
742}
743
744static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
745{
746 struct vhost_vsock *other;
747
748 /* Refuse reserved CIDs */
749 if (guest_cid <= VMADDR_CID_HOST ||
750 guest_cid == U32_MAX)
751 return -EINVAL;
752
753 /* 64-bit CIDs are not yet supported */
754 if (guest_cid > U32_MAX)
755 return -EINVAL;
756
ed8640a9
SG
757 /* Refuse if CID is assigned to the guest->host transport (i.e. nested
758 * VM), to make the loopback work.
759 */
760 if (vsock_find_cid(guest_cid))
761 return -EADDRINUSE;
762
433fc58e 763 /* Refuse if CID is already in use */
6db3d8dc 764 mutex_lock(&vhost_vsock_mutex);
834e772c 765 other = vhost_vsock_get(guest_cid);
6c083c2b 766 if (other && other != vsock) {
6db3d8dc 767 mutex_unlock(&vhost_vsock_mutex);
6c083c2b
G
768 return -EADDRINUSE;
769 }
834e772c
SH
770
771 if (vsock->guest_cid)
772 hash_del_rcu(&vsock->hash);
773
433fc58e 774 vsock->guest_cid = guest_cid;
7fbe078c 775 hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
6db3d8dc 776 mutex_unlock(&vhost_vsock_mutex);
433fc58e
AH
777
778 return 0;
779}
780
781static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
782{
783 struct vhost_virtqueue *vq;
784 int i;
785
786 if (features & ~VHOST_VSOCK_FEATURES)
787 return -EOPNOTSUPP;
788
789 mutex_lock(&vsock->dev.mutex);
790 if ((features & (1 << VHOST_F_LOG_ALL)) &&
791 !vhost_log_access_ok(&vsock->dev)) {
e13a6915
SG
792 goto err;
793 }
794
795 if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
759aba1e 796 if (vhost_init_device_iotlb(&vsock->dev))
e13a6915 797 goto err;
433fc58e
AH
798 }
799
ced7b713
AK
800 if (features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET))
801 vsock->seqpacket_allow = true;
802
433fc58e
AH
803 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
804 vq = &vsock->vqs[i];
805 mutex_lock(&vq->mutex);
806 vq->acked_features = features;
807 mutex_unlock(&vq->mutex);
808 }
809 mutex_unlock(&vsock->dev.mutex);
810 return 0;
e13a6915
SG
811
812err:
813 mutex_unlock(&vsock->dev.mutex);
814 return -EFAULT;
433fc58e
AH
815}
816
817static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
818 unsigned long arg)
819{
820 struct vhost_vsock *vsock = f->private_data;
821 void __user *argp = (void __user *)arg;
822 u64 guest_cid;
823 u64 features;
824 int start;
825 int r;
826
827 switch (ioctl) {
828 case VHOST_VSOCK_SET_GUEST_CID:
829 if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
830 return -EFAULT;
831 return vhost_vsock_set_cid(vsock, guest_cid);
832 case VHOST_VSOCK_SET_RUNNING:
833 if (copy_from_user(&start, argp, sizeof(start)))
834 return -EFAULT;
835 if (start)
836 return vhost_vsock_start(vsock);
837 else
a58da53f 838 return vhost_vsock_stop(vsock, true);
433fc58e
AH
839 case VHOST_GET_FEATURES:
840 features = VHOST_VSOCK_FEATURES;
841 if (copy_to_user(argp, &features, sizeof(features)))
842 return -EFAULT;
843 return 0;
844 case VHOST_SET_FEATURES:
845 if (copy_from_user(&features, argp, sizeof(features)))
846 return -EFAULT;
847 return vhost_vsock_set_features(vsock, features);
e13a6915
SG
848 case VHOST_GET_BACKEND_FEATURES:
849 features = VHOST_VSOCK_BACKEND_FEATURES;
850 if (copy_to_user(argp, &features, sizeof(features)))
851 return -EFAULT;
852 return 0;
853 case VHOST_SET_BACKEND_FEATURES:
854 if (copy_from_user(&features, argp, sizeof(features)))
855 return -EFAULT;
856 if (features & ~VHOST_VSOCK_BACKEND_FEATURES)
857 return -EOPNOTSUPP;
858 vhost_set_backend_features(&vsock->dev, features);
859 return 0;
433fc58e
AH
860 default:
861 mutex_lock(&vsock->dev.mutex);
862 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
863 if (r == -ENOIOCTLCMD)
864 r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
865 else
866 vhost_vsock_flush(vsock);
867 mutex_unlock(&vsock->dev.mutex);
868 return r;
869 }
870}
871
e13a6915
SG
872static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
873{
874 struct file *file = iocb->ki_filp;
875 struct vhost_vsock *vsock = file->private_data;
876 struct vhost_dev *dev = &vsock->dev;
877 int noblock = file->f_flags & O_NONBLOCK;
878
879 return vhost_chr_read_iter(dev, to, noblock);
880}
881
882static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb,
883 struct iov_iter *from)
884{
885 struct file *file = iocb->ki_filp;
886 struct vhost_vsock *vsock = file->private_data;
887 struct vhost_dev *dev = &vsock->dev;
888
889 return vhost_chr_write_iter(dev, from);
890}
891
892static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait)
893{
894 struct vhost_vsock *vsock = file->private_data;
895 struct vhost_dev *dev = &vsock->dev;
896
897 return vhost_chr_poll(file, dev, wait);
898}
899
433fc58e
AH
900static const struct file_operations vhost_vsock_fops = {
901 .owner = THIS_MODULE,
902 .open = vhost_vsock_dev_open,
903 .release = vhost_vsock_dev_release,
904 .llseek = noop_llseek,
905 .unlocked_ioctl = vhost_vsock_dev_ioctl,
407e9ef7 906 .compat_ioctl = compat_ptr_ioctl,
e13a6915
SG
907 .read_iter = vhost_vsock_chr_read_iter,
908 .write_iter = vhost_vsock_chr_write_iter,
909 .poll = vhost_vsock_chr_poll,
433fc58e
AH
910};
911
912static struct miscdevice vhost_vsock_misc = {
f4660cc9 913 .minor = VHOST_VSOCK_MINOR,
433fc58e
AH
914 .name = "vhost-vsock",
915 .fops = &vhost_vsock_fops,
916};
917
433fc58e
AH
918static int __init vhost_vsock_init(void)
919{
920 int ret;
921
c0cfa2d8
SG
922 ret = vsock_core_register(&vhost_transport.transport,
923 VSOCK_TRANSPORT_F_H2G);
433fc58e
AH
924 if (ret < 0)
925 return ret;
7a4efe18
YC
926
927 ret = misc_register(&vhost_vsock_misc);
928 if (ret) {
929 vsock_core_unregister(&vhost_transport.transport);
930 return ret;
931 }
932
933 return 0;
433fc58e
AH
934};
935
936static void __exit vhost_vsock_exit(void)
937{
938 misc_deregister(&vhost_vsock_misc);
c0cfa2d8 939 vsock_core_unregister(&vhost_transport.transport);
433fc58e
AH
940};
941
942module_init(vhost_vsock_init);
943module_exit(vhost_vsock_exit);
944MODULE_LICENSE("GPL v2");
945MODULE_AUTHOR("Asias He");
946MODULE_DESCRIPTION("vhost transport for vsock ");
f4660cc9
SH
947MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR);
948MODULE_ALIAS("devname:vhost-vsock");