Commit | Line | Data |
---|---|---|
7a338472 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
433fc58e AH |
2 | /* |
3 | * vhost transport for vsock | |
4 | * | |
5 | * Copyright (C) 2013-2015 Red Hat, Inc. | |
6 | * Author: Asias He <asias@redhat.com> | |
7 | * Stefan Hajnoczi <stefanha@redhat.com> | |
433fc58e AH |
8 | */ |
9 | #include <linux/miscdevice.h> | |
10 | #include <linux/atomic.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/mutex.h> | |
13 | #include <linux/vmalloc.h> | |
14 | #include <net/sock.h> | |
15 | #include <linux/virtio_vsock.h> | |
16 | #include <linux/vhost.h> | |
834e772c | 17 | #include <linux/hashtable.h> |
433fc58e AH |
18 | |
19 | #include <net/af_vsock.h> | |
20 | #include "vhost.h" | |
21 | ||
22 | #define VHOST_VSOCK_DEFAULT_HOST_CID 2 | |
e82b9b07 JW |
23 | /* Max number of bytes transferred before requeueing the job. |
24 | * Using this limit prevents one virtqueue from starving others. */ | |
25 | #define VHOST_VSOCK_WEIGHT 0x80000 | |
26 | /* Max number of packets transferred before requeueing the job. | |
27 | * Using this limit prevents one virtqueue from starving others with | |
28 | * small pkts. | |
29 | */ | |
30 | #define VHOST_VSOCK_PKT_WEIGHT 256 | |
433fc58e AH |
31 | |
32 | enum { | |
e13a6915 | 33 | VHOST_VSOCK_FEATURES = VHOST_FEATURES | |
ced7b713 AK |
34 | (1ULL << VIRTIO_F_ACCESS_PLATFORM) | |
35 | (1ULL << VIRTIO_VSOCK_F_SEQPACKET) | |
e13a6915 SG |
36 | }; |
37 | ||
38 | enum { | |
39 | VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) | |
433fc58e AH |
40 | }; |
41 | ||
42 | /* Used to track all the vhost_vsock instances on the system. */ | |
6db3d8dc | 43 | static DEFINE_MUTEX(vhost_vsock_mutex); |
834e772c | 44 | static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8); |
433fc58e AH |
45 | |
46 | struct vhost_vsock { | |
47 | struct vhost_dev dev; | |
48 | struct vhost_virtqueue vqs[2]; | |
49 | ||
6db3d8dc | 50 | /* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */ |
834e772c | 51 | struct hlist_node hash; |
433fc58e AH |
52 | |
53 | struct vhost_work send_pkt_work; | |
54 | spinlock_t send_pkt_list_lock; | |
55 | struct list_head send_pkt_list; /* host->guest pending packets */ | |
56 | ||
57 | atomic_t queued_replies; | |
58 | ||
59 | u32 guest_cid; | |
ced7b713 | 60 | bool seqpacket_allow; |
433fc58e AH |
61 | }; |
62 | ||
63 | static u32 vhost_transport_get_local_cid(void) | |
64 | { | |
65 | return VHOST_VSOCK_DEFAULT_HOST_CID; | |
66 | } | |
67 | ||
6db3d8dc | 68 | /* Callers that dereference the return value must hold vhost_vsock_mutex or the |
834e772c SH |
69 | * RCU read lock. |
70 | */ | |
71 | static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) | |
433fc58e AH |
72 | { |
73 | struct vhost_vsock *vsock; | |
74 | ||
834e772c | 75 | hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) { |
433fc58e AH |
76 | u32 other_cid = vsock->guest_cid; |
77 | ||
78 | /* Skip instances that have no CID yet */ | |
79 | if (other_cid == 0) | |
80 | continue; | |
81 | ||
ff3c1b1a | 82 | if (other_cid == guest_cid) |
433fc58e | 83 | return vsock; |
ff3c1b1a | 84 | |
433fc58e | 85 | } |
433fc58e AH |
86 | |
87 | return NULL; | |
88 | } | |
89 | ||
90 | static void | |
91 | vhost_transport_do_send_pkt(struct vhost_vsock *vsock, | |
92 | struct vhost_virtqueue *vq) | |
93 | { | |
94 | struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; | |
e79b431f | 95 | int pkts = 0, total_len = 0; |
433fc58e AH |
96 | bool added = false; |
97 | bool restart_tx = false; | |
98 | ||
99 | mutex_lock(&vq->mutex); | |
100 | ||
247643f8 | 101 | if (!vhost_vq_get_backend(vq)) |
433fc58e AH |
102 | goto out; |
103 | ||
e13a6915 SG |
104 | if (!vq_meta_prefetch(vq)) |
105 | goto out; | |
106 | ||
433fc58e AH |
107 | /* Avoid further vmexits, we're already processing the virtqueue */ |
108 | vhost_disable_notify(&vsock->dev, vq); | |
109 | ||
e79b431f | 110 | do { |
433fc58e AH |
111 | struct virtio_vsock_pkt *pkt; |
112 | struct iov_iter iov_iter; | |
113 | unsigned out, in; | |
114 | size_t nbytes; | |
6dbd3e66 | 115 | size_t iov_len, payload_len; |
433fc58e | 116 | int head; |
1af7e555 | 117 | u32 flags_to_restore = 0; |
433fc58e AH |
118 | |
119 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
120 | if (list_empty(&vsock->send_pkt_list)) { | |
121 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
122 | vhost_enable_notify(&vsock->dev, vq); | |
123 | break; | |
124 | } | |
125 | ||
126 | pkt = list_first_entry(&vsock->send_pkt_list, | |
127 | struct virtio_vsock_pkt, list); | |
128 | list_del_init(&pkt->list); | |
129 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
130 | ||
131 | head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), | |
132 | &out, &in, NULL, NULL); | |
133 | if (head < 0) { | |
134 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
135 | list_add(&pkt->list, &vsock->send_pkt_list); | |
136 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
137 | break; | |
138 | } | |
139 | ||
140 | if (head == vq->num) { | |
141 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
142 | list_add(&pkt->list, &vsock->send_pkt_list); | |
143 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
144 | ||
145 | /* We cannot finish yet if more buffers snuck in while | |
146 | * re-enabling notify. | |
147 | */ | |
148 | if (unlikely(vhost_enable_notify(&vsock->dev, vq))) { | |
149 | vhost_disable_notify(&vsock->dev, vq); | |
150 | continue; | |
151 | } | |
152 | break; | |
153 | } | |
154 | ||
155 | if (out) { | |
156 | virtio_transport_free_pkt(pkt); | |
157 | vq_err(vq, "Expected 0 output buffers, got %u\n", out); | |
158 | break; | |
159 | } | |
160 | ||
6dbd3e66 SG |
161 | iov_len = iov_length(&vq->iov[out], in); |
162 | if (iov_len < sizeof(pkt->hdr)) { | |
163 | virtio_transport_free_pkt(pkt); | |
164 | vq_err(vq, "Buffer len [%zu] too small\n", iov_len); | |
165 | break; | |
166 | } | |
167 | ||
168 | iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len); | |
169 | payload_len = pkt->len - pkt->off; | |
170 | ||
171 | /* If the packet is greater than the space available in the | |
172 | * buffer, we split it using multiple buffers. | |
173 | */ | |
ced7b713 | 174 | if (payload_len > iov_len - sizeof(pkt->hdr)) { |
6dbd3e66 SG |
175 | payload_len = iov_len - sizeof(pkt->hdr); |
176 | ||
ced7b713 AK |
177 | /* As we are copying pieces of large packet's buffer to |
178 | * small rx buffers, headers of packets in rx queue are | |
179 | * created dynamically and are initialized with header | |
180 | * of current packet(except length). But in case of | |
9af8f106 | 181 | * SOCK_SEQPACKET, we also must clear message delimeter |
1af7e555 AK |
182 | * bit (VIRTIO_VSOCK_SEQ_EOM) and MSG_EOR bit |
183 | * (VIRTIO_VSOCK_SEQ_EOR) if set. Otherwise, | |
184 | * there will be sequence of packets with these | |
185 | * bits set. After initialized header will be copied to | |
186 | * rx buffer, these required bits will be restored. | |
ced7b713 | 187 | */ |
9af8f106 AK |
188 | if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) { |
189 | pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM); | |
1af7e555 AK |
190 | flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM; |
191 | ||
192 | if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) { | |
193 | pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR); | |
194 | flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR; | |
195 | } | |
ced7b713 AK |
196 | } |
197 | } | |
198 | ||
6dbd3e66 SG |
199 | /* Set the correct length in the header */ |
200 | pkt->hdr.len = cpu_to_le32(payload_len); | |
433fc58e AH |
201 | |
202 | nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter); | |
203 | if (nbytes != sizeof(pkt->hdr)) { | |
204 | virtio_transport_free_pkt(pkt); | |
205 | vq_err(vq, "Faulted on copying pkt hdr\n"); | |
206 | break; | |
207 | } | |
208 | ||
6dbd3e66 SG |
209 | nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len, |
210 | &iov_iter); | |
211 | if (nbytes != payload_len) { | |
433fc58e AH |
212 | virtio_transport_free_pkt(pkt); |
213 | vq_err(vq, "Faulted on copying pkt buf\n"); | |
214 | break; | |
215 | } | |
216 | ||
107bc076 SG |
217 | /* Deliver to monitoring devices all packets that we |
218 | * will transmit. | |
82dfb540 GG |
219 | */ |
220 | virtio_transport_deliver_tap_pkt(pkt); | |
221 | ||
107bc076 SG |
222 | vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len); |
223 | added = true; | |
224 | ||
6dbd3e66 SG |
225 | pkt->off += payload_len; |
226 | total_len += payload_len; | |
227 | ||
228 | /* If we didn't send all the payload we can requeue the packet | |
229 | * to send it with the next available buffer. | |
230 | */ | |
231 | if (pkt->off < pkt->len) { | |
1af7e555 | 232 | pkt->hdr.flags |= cpu_to_le32(flags_to_restore); |
ced7b713 | 233 | |
a78d1639 SG |
234 | /* We are queueing the same virtio_vsock_pkt to handle |
235 | * the remaining bytes, and we want to deliver it | |
236 | * to monitoring devices in the next iteration. | |
237 | */ | |
238 | pkt->tap_delivered = false; | |
239 | ||
6dbd3e66 SG |
240 | spin_lock_bh(&vsock->send_pkt_list_lock); |
241 | list_add(&pkt->list, &vsock->send_pkt_list); | |
242 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
243 | } else { | |
244 | if (pkt->reply) { | |
245 | int val; | |
246 | ||
247 | val = atomic_dec_return(&vsock->queued_replies); | |
248 | ||
249 | /* Do we have resources to resume tx | |
250 | * processing? | |
251 | */ | |
252 | if (val + 1 == tx_vq->num) | |
253 | restart_tx = true; | |
254 | } | |
255 | ||
256 | virtio_transport_free_pkt(pkt); | |
257 | } | |
e79b431f | 258 | } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len))); |
433fc58e AH |
259 | if (added) |
260 | vhost_signal(&vsock->dev, vq); | |
261 | ||
262 | out: | |
263 | mutex_unlock(&vq->mutex); | |
264 | ||
265 | if (restart_tx) | |
266 | vhost_poll_queue(&tx_vq->poll); | |
267 | } | |
268 | ||
269 | static void vhost_transport_send_pkt_work(struct vhost_work *work) | |
270 | { | |
271 | struct vhost_virtqueue *vq; | |
272 | struct vhost_vsock *vsock; | |
273 | ||
274 | vsock = container_of(work, struct vhost_vsock, send_pkt_work); | |
275 | vq = &vsock->vqs[VSOCK_VQ_RX]; | |
276 | ||
277 | vhost_transport_do_send_pkt(vsock, vq); | |
278 | } | |
279 | ||
280 | static int | |
281 | vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) | |
282 | { | |
283 | struct vhost_vsock *vsock; | |
433fc58e AH |
284 | int len = pkt->len; |
285 | ||
834e772c SH |
286 | rcu_read_lock(); |
287 | ||
433fc58e AH |
288 | /* Find the vhost_vsock according to guest context id */ |
289 | vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); | |
290 | if (!vsock) { | |
834e772c | 291 | rcu_read_unlock(); |
433fc58e AH |
292 | virtio_transport_free_pkt(pkt); |
293 | return -ENODEV; | |
294 | } | |
295 | ||
433fc58e AH |
296 | if (pkt->reply) |
297 | atomic_inc(&vsock->queued_replies); | |
298 | ||
299 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
300 | list_add_tail(&pkt->list, &vsock->send_pkt_list); | |
301 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
302 | ||
303 | vhost_work_queue(&vsock->dev, &vsock->send_pkt_work); | |
834e772c SH |
304 | |
305 | rcu_read_unlock(); | |
433fc58e AH |
306 | return len; |
307 | } | |
308 | ||
16320f36 PT |
309 | static int |
310 | vhost_transport_cancel_pkt(struct vsock_sock *vsk) | |
311 | { | |
312 | struct vhost_vsock *vsock; | |
313 | struct virtio_vsock_pkt *pkt, *n; | |
314 | int cnt = 0; | |
834e772c | 315 | int ret = -ENODEV; |
16320f36 PT |
316 | LIST_HEAD(freeme); |
317 | ||
834e772c SH |
318 | rcu_read_lock(); |
319 | ||
16320f36 PT |
320 | /* Find the vhost_vsock according to guest context id */ |
321 | vsock = vhost_vsock_get(vsk->remote_addr.svm_cid); | |
322 | if (!vsock) | |
834e772c | 323 | goto out; |
16320f36 PT |
324 | |
325 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
326 | list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { | |
327 | if (pkt->vsk != vsk) | |
328 | continue; | |
329 | list_move(&pkt->list, &freeme); | |
330 | } | |
331 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
332 | ||
333 | list_for_each_entry_safe(pkt, n, &freeme, list) { | |
334 | if (pkt->reply) | |
335 | cnt++; | |
336 | list_del(&pkt->list); | |
337 | virtio_transport_free_pkt(pkt); | |
338 | } | |
339 | ||
340 | if (cnt) { | |
341 | struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; | |
342 | int new_cnt; | |
343 | ||
344 | new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); | |
345 | if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num) | |
346 | vhost_poll_queue(&tx_vq->poll); | |
347 | } | |
348 | ||
834e772c SH |
349 | ret = 0; |
350 | out: | |
351 | rcu_read_unlock(); | |
352 | return ret; | |
16320f36 PT |
353 | } |
354 | ||
433fc58e AH |
355 | static struct virtio_vsock_pkt * |
356 | vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, | |
357 | unsigned int out, unsigned int in) | |
358 | { | |
359 | struct virtio_vsock_pkt *pkt; | |
360 | struct iov_iter iov_iter; | |
361 | size_t nbytes; | |
362 | size_t len; | |
363 | ||
364 | if (in != 0) { | |
365 | vq_err(vq, "Expected 0 input buffers, got %u\n", in); | |
366 | return NULL; | |
367 | } | |
368 | ||
369 | pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); | |
370 | if (!pkt) | |
371 | return NULL; | |
372 | ||
373 | len = iov_length(vq->iov, out); | |
374 | iov_iter_init(&iov_iter, WRITE, vq->iov, out, len); | |
375 | ||
376 | nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter); | |
377 | if (nbytes != sizeof(pkt->hdr)) { | |
378 | vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n", | |
379 | sizeof(pkt->hdr), nbytes); | |
380 | kfree(pkt); | |
381 | return NULL; | |
382 | } | |
383 | ||
ced7b713 | 384 | pkt->len = le32_to_cpu(pkt->hdr.len); |
433fc58e AH |
385 | |
386 | /* No payload */ | |
387 | if (!pkt->len) | |
388 | return pkt; | |
389 | ||
390 | /* The pkt is too big */ | |
391 | if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) { | |
392 | kfree(pkt); | |
393 | return NULL; | |
394 | } | |
395 | ||
396 | pkt->buf = kmalloc(pkt->len, GFP_KERNEL); | |
397 | if (!pkt->buf) { | |
398 | kfree(pkt); | |
399 | return NULL; | |
400 | } | |
401 | ||
473c7391 SG |
402 | pkt->buf_len = pkt->len; |
403 | ||
433fc58e AH |
404 | nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter); |
405 | if (nbytes != pkt->len) { | |
406 | vq_err(vq, "Expected %u byte payload, got %zu bytes\n", | |
407 | pkt->len, nbytes); | |
408 | virtio_transport_free_pkt(pkt); | |
409 | return NULL; | |
410 | } | |
411 | ||
412 | return pkt; | |
413 | } | |
414 | ||
415 | /* Is there space left for replies to rx packets? */ | |
416 | static bool vhost_vsock_more_replies(struct vhost_vsock *vsock) | |
417 | { | |
418 | struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX]; | |
419 | int val; | |
420 | ||
421 | smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */ | |
422 | val = atomic_read(&vsock->queued_replies); | |
423 | ||
424 | return val < vq->num; | |
425 | } | |
426 | ||
ced7b713 AK |
427 | static bool vhost_transport_seqpacket_allow(u32 remote_cid); |
428 | ||
4c7246dc SG |
429 | static struct virtio_transport vhost_transport = { |
430 | .transport = { | |
6a2c0962 SG |
431 | .module = THIS_MODULE, |
432 | ||
4c7246dc SG |
433 | .get_local_cid = vhost_transport_get_local_cid, |
434 | ||
435 | .init = virtio_transport_do_socket_init, | |
436 | .destruct = virtio_transport_destruct, | |
437 | .release = virtio_transport_release, | |
438 | .connect = virtio_transport_connect, | |
439 | .shutdown = virtio_transport_shutdown, | |
440 | .cancel_pkt = vhost_transport_cancel_pkt, | |
441 | ||
442 | .dgram_enqueue = virtio_transport_dgram_enqueue, | |
443 | .dgram_dequeue = virtio_transport_dgram_dequeue, | |
444 | .dgram_bind = virtio_transport_dgram_bind, | |
445 | .dgram_allow = virtio_transport_dgram_allow, | |
446 | ||
447 | .stream_enqueue = virtio_transport_stream_enqueue, | |
448 | .stream_dequeue = virtio_transport_stream_dequeue, | |
449 | .stream_has_data = virtio_transport_stream_has_data, | |
450 | .stream_has_space = virtio_transport_stream_has_space, | |
451 | .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, | |
452 | .stream_is_active = virtio_transport_stream_is_active, | |
453 | .stream_allow = virtio_transport_stream_allow, | |
454 | ||
ced7b713 AK |
455 | .seqpacket_dequeue = virtio_transport_seqpacket_dequeue, |
456 | .seqpacket_enqueue = virtio_transport_seqpacket_enqueue, | |
457 | .seqpacket_allow = vhost_transport_seqpacket_allow, | |
458 | .seqpacket_has_data = virtio_transport_seqpacket_has_data, | |
459 | ||
4c7246dc SG |
460 | .notify_poll_in = virtio_transport_notify_poll_in, |
461 | .notify_poll_out = virtio_transport_notify_poll_out, | |
462 | .notify_recv_init = virtio_transport_notify_recv_init, | |
463 | .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, | |
464 | .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, | |
465 | .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, | |
466 | .notify_send_init = virtio_transport_notify_send_init, | |
467 | .notify_send_pre_block = virtio_transport_notify_send_pre_block, | |
468 | .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, | |
469 | .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, | |
b9f2b0ff | 470 | .notify_buffer_size = virtio_transport_notify_buffer_size, |
4c7246dc | 471 | |
4c7246dc SG |
472 | }, |
473 | ||
474 | .send_pkt = vhost_transport_send_pkt, | |
475 | }; | |
476 | ||
ced7b713 AK |
477 | static bool vhost_transport_seqpacket_allow(u32 remote_cid) |
478 | { | |
479 | struct vhost_vsock *vsock; | |
480 | bool seqpacket_allow = false; | |
481 | ||
482 | rcu_read_lock(); | |
483 | vsock = vhost_vsock_get(remote_cid); | |
484 | ||
485 | if (vsock) | |
486 | seqpacket_allow = vsock->seqpacket_allow; | |
487 | ||
488 | rcu_read_unlock(); | |
489 | ||
490 | return seqpacket_allow; | |
491 | } | |
492 | ||
433fc58e AH |
493 | static void vhost_vsock_handle_tx_kick(struct vhost_work *work) |
494 | { | |
495 | struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, | |
496 | poll.work); | |
497 | struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, | |
498 | dev); | |
499 | struct virtio_vsock_pkt *pkt; | |
e79b431f | 500 | int head, pkts = 0, total_len = 0; |
433fc58e AH |
501 | unsigned int out, in; |
502 | bool added = false; | |
503 | ||
504 | mutex_lock(&vq->mutex); | |
505 | ||
247643f8 | 506 | if (!vhost_vq_get_backend(vq)) |
433fc58e AH |
507 | goto out; |
508 | ||
e13a6915 SG |
509 | if (!vq_meta_prefetch(vq)) |
510 | goto out; | |
511 | ||
433fc58e | 512 | vhost_disable_notify(&vsock->dev, vq); |
e79b431f | 513 | do { |
433fc58e AH |
514 | if (!vhost_vsock_more_replies(vsock)) { |
515 | /* Stop tx until the device processes already | |
516 | * pending replies. Leave tx virtqueue | |
517 | * callbacks disabled. | |
518 | */ | |
519 | goto no_more_replies; | |
520 | } | |
521 | ||
522 | head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), | |
523 | &out, &in, NULL, NULL); | |
524 | if (head < 0) | |
525 | break; | |
526 | ||
527 | if (head == vq->num) { | |
528 | if (unlikely(vhost_enable_notify(&vsock->dev, vq))) { | |
529 | vhost_disable_notify(&vsock->dev, vq); | |
530 | continue; | |
531 | } | |
532 | break; | |
533 | } | |
534 | ||
535 | pkt = vhost_vsock_alloc_pkt(vq, out, in); | |
536 | if (!pkt) { | |
537 | vq_err(vq, "Faulted on pkt\n"); | |
538 | continue; | |
539 | } | |
540 | ||
11708ff9 | 541 | total_len += sizeof(pkt->hdr) + pkt->len; |
3fda5d6e | 542 | |
82dfb540 GG |
543 | /* Deliver to monitoring devices all received packets */ |
544 | virtio_transport_deliver_tap_pkt(pkt); | |
545 | ||
433fc58e | 546 | /* Only accept correctly addressed packets */ |
8a3cc29c SG |
547 | if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid && |
548 | le64_to_cpu(pkt->hdr.dst_cid) == | |
549 | vhost_transport_get_local_cid()) | |
4c7246dc | 550 | virtio_transport_recv_pkt(&vhost_transport, pkt); |
433fc58e AH |
551 | else |
552 | virtio_transport_free_pkt(pkt); | |
553 | ||
49d8c5ff | 554 | vhost_add_used(vq, head, 0); |
433fc58e | 555 | added = true; |
e79b431f | 556 | } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len))); |
433fc58e AH |
557 | |
558 | no_more_replies: | |
559 | if (added) | |
560 | vhost_signal(&vsock->dev, vq); | |
561 | ||
562 | out: | |
563 | mutex_unlock(&vq->mutex); | |
564 | } | |
565 | ||
566 | static void vhost_vsock_handle_rx_kick(struct vhost_work *work) | |
567 | { | |
568 | struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, | |
569 | poll.work); | |
570 | struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, | |
571 | dev); | |
572 | ||
573 | vhost_transport_do_send_pkt(vsock, vq); | |
574 | } | |
575 | ||
576 | static int vhost_vsock_start(struct vhost_vsock *vsock) | |
577 | { | |
0516ffd8 | 578 | struct vhost_virtqueue *vq; |
433fc58e AH |
579 | size_t i; |
580 | int ret; | |
581 | ||
582 | mutex_lock(&vsock->dev.mutex); | |
583 | ||
584 | ret = vhost_dev_check_owner(&vsock->dev); | |
585 | if (ret) | |
586 | goto err; | |
587 | ||
588 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { | |
0516ffd8 | 589 | vq = &vsock->vqs[i]; |
433fc58e AH |
590 | |
591 | mutex_lock(&vq->mutex); | |
592 | ||
593 | if (!vhost_vq_access_ok(vq)) { | |
594 | ret = -EFAULT; | |
433fc58e AH |
595 | goto err_vq; |
596 | } | |
597 | ||
247643f8 EP |
598 | if (!vhost_vq_get_backend(vq)) { |
599 | vhost_vq_set_backend(vq, vsock); | |
0516ffd8 SH |
600 | ret = vhost_vq_init_access(vq); |
601 | if (ret) | |
602 | goto err_vq; | |
433fc58e AH |
603 | } |
604 | ||
605 | mutex_unlock(&vq->mutex); | |
606 | } | |
607 | ||
0b841030 JH |
608 | /* Some packets may have been queued before the device was started, |
609 | * let's kick the send worker to send them. | |
610 | */ | |
611 | vhost_work_queue(&vsock->dev, &vsock->send_pkt_work); | |
612 | ||
433fc58e AH |
613 | mutex_unlock(&vsock->dev.mutex); |
614 | return 0; | |
615 | ||
616 | err_vq: | |
247643f8 | 617 | vhost_vq_set_backend(vq, NULL); |
0516ffd8 SH |
618 | mutex_unlock(&vq->mutex); |
619 | ||
433fc58e | 620 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { |
0516ffd8 | 621 | vq = &vsock->vqs[i]; |
433fc58e AH |
622 | |
623 | mutex_lock(&vq->mutex); | |
247643f8 | 624 | vhost_vq_set_backend(vq, NULL); |
433fc58e AH |
625 | mutex_unlock(&vq->mutex); |
626 | } | |
627 | err: | |
628 | mutex_unlock(&vsock->dev.mutex); | |
629 | return ret; | |
630 | } | |
631 | ||
a58da53f | 632 | static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner) |
433fc58e AH |
633 | { |
634 | size_t i; | |
a58da53f | 635 | int ret = 0; |
433fc58e AH |
636 | |
637 | mutex_lock(&vsock->dev.mutex); | |
638 | ||
a58da53f SG |
639 | if (check_owner) { |
640 | ret = vhost_dev_check_owner(&vsock->dev); | |
641 | if (ret) | |
642 | goto err; | |
643 | } | |
433fc58e AH |
644 | |
645 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { | |
646 | struct vhost_virtqueue *vq = &vsock->vqs[i]; | |
647 | ||
648 | mutex_lock(&vq->mutex); | |
247643f8 | 649 | vhost_vq_set_backend(vq, NULL); |
433fc58e AH |
650 | mutex_unlock(&vq->mutex); |
651 | } | |
652 | ||
653 | err: | |
654 | mutex_unlock(&vsock->dev.mutex); | |
655 | return ret; | |
656 | } | |
657 | ||
658 | static void vhost_vsock_free(struct vhost_vsock *vsock) | |
659 | { | |
b226acab | 660 | kvfree(vsock); |
433fc58e AH |
661 | } |
662 | ||
663 | static int vhost_vsock_dev_open(struct inode *inode, struct file *file) | |
664 | { | |
665 | struct vhost_virtqueue **vqs; | |
666 | struct vhost_vsock *vsock; | |
667 | int ret; | |
668 | ||
669 | /* This struct is large and allocation could fail, fall back to vmalloc | |
670 | * if there is no other way. | |
671 | */ | |
dcda9b04 | 672 | vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL); |
6c5ab651 MH |
673 | if (!vsock) |
674 | return -ENOMEM; | |
433fc58e AH |
675 | |
676 | vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL); | |
677 | if (!vqs) { | |
678 | ret = -ENOMEM; | |
679 | goto out; | |
680 | } | |
681 | ||
a72b69dc SH |
682 | vsock->guest_cid = 0; /* no CID assigned yet */ |
683 | ||
433fc58e AH |
684 | atomic_set(&vsock->queued_replies, 0); |
685 | ||
686 | vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX]; | |
687 | vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX]; | |
688 | vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick; | |
689 | vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick; | |
690 | ||
e82b9b07 JW |
691 | vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), |
692 | UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT, | |
01fcb1cb | 693 | VHOST_VSOCK_WEIGHT, true, NULL); |
433fc58e AH |
694 | |
695 | file->private_data = vsock; | |
696 | spin_lock_init(&vsock->send_pkt_list_lock); | |
697 | INIT_LIST_HEAD(&vsock->send_pkt_list); | |
698 | vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work); | |
433fc58e AH |
699 | return 0; |
700 | ||
701 | out: | |
702 | vhost_vsock_free(vsock); | |
703 | return ret; | |
704 | } | |
705 | ||
706 | static void vhost_vsock_flush(struct vhost_vsock *vsock) | |
707 | { | |
b2ffa407 | 708 | vhost_dev_flush(&vsock->dev); |
433fc58e AH |
709 | } |
710 | ||
711 | static void vhost_vsock_reset_orphans(struct sock *sk) | |
712 | { | |
713 | struct vsock_sock *vsk = vsock_sk(sk); | |
714 | ||
715 | /* vmci_transport.c doesn't take sk_lock here either. At least we're | |
716 | * under vsock_table_lock so the sock cannot disappear while we're | |
717 | * executing. | |
718 | */ | |
719 | ||
c38f57da SH |
720 | /* If the peer is still valid, no need to reset connection */ |
721 | if (vhost_vsock_get(vsk->remote_addr.svm_cid)) | |
722 | return; | |
723 | ||
724 | /* If the close timeout is pending, let it expire. This avoids races | |
725 | * with the timeout callback. | |
726 | */ | |
727 | if (vsk->close_work_scheduled) | |
728 | return; | |
729 | ||
730 | sock_set_flag(sk, SOCK_DONE); | |
731 | vsk->peer_shutdown = SHUTDOWN_MASK; | |
732 | sk->sk_state = SS_UNCONNECTED; | |
733 | sk->sk_err = ECONNRESET; | |
e3ae2365 | 734 | sk_error_report(sk); |
433fc58e AH |
735 | } |
736 | ||
737 | static int vhost_vsock_dev_release(struct inode *inode, struct file *file) | |
738 | { | |
739 | struct vhost_vsock *vsock = file->private_data; | |
740 | ||
6db3d8dc | 741 | mutex_lock(&vhost_vsock_mutex); |
834e772c SH |
742 | if (vsock->guest_cid) |
743 | hash_del_rcu(&vsock->hash); | |
6db3d8dc | 744 | mutex_unlock(&vhost_vsock_mutex); |
433fc58e | 745 | |
834e772c SH |
746 | /* Wait for other CPUs to finish using vsock */ |
747 | synchronize_rcu(); | |
748 | ||
433fc58e AH |
749 | /* Iterating over all connections for all CIDs to find orphans is |
750 | * inefficient. Room for improvement here. */ | |
8e6ed963 JP |
751 | vsock_for_each_connected_socket(&vhost_transport.transport, |
752 | vhost_vsock_reset_orphans); | |
433fc58e | 753 | |
a58da53f SG |
754 | /* Don't check the owner, because we are in the release path, so we |
755 | * need to stop the vsock device in any case. | |
756 | * vhost_vsock_stop() can not fail in this case, so we don't need to | |
757 | * check the return code. | |
758 | */ | |
759 | vhost_vsock_stop(vsock, false); | |
433fc58e AH |
760 | vhost_vsock_flush(vsock); |
761 | vhost_dev_stop(&vsock->dev); | |
762 | ||
763 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
764 | while (!list_empty(&vsock->send_pkt_list)) { | |
765 | struct virtio_vsock_pkt *pkt; | |
766 | ||
767 | pkt = list_first_entry(&vsock->send_pkt_list, | |
768 | struct virtio_vsock_pkt, list); | |
769 | list_del_init(&pkt->list); | |
770 | virtio_transport_free_pkt(pkt); | |
771 | } | |
772 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
773 | ||
f6f93f75 | 774 | vhost_dev_cleanup(&vsock->dev); |
433fc58e AH |
775 | kfree(vsock->dev.vqs); |
776 | vhost_vsock_free(vsock); | |
777 | return 0; | |
778 | } | |
779 | ||
780 | static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid) | |
781 | { | |
782 | struct vhost_vsock *other; | |
783 | ||
784 | /* Refuse reserved CIDs */ | |
785 | if (guest_cid <= VMADDR_CID_HOST || | |
786 | guest_cid == U32_MAX) | |
787 | return -EINVAL; | |
788 | ||
789 | /* 64-bit CIDs are not yet supported */ | |
790 | if (guest_cid > U32_MAX) | |
791 | return -EINVAL; | |
792 | ||
ed8640a9 SG |
793 | /* Refuse if CID is assigned to the guest->host transport (i.e. nested |
794 | * VM), to make the loopback work. | |
795 | */ | |
796 | if (vsock_find_cid(guest_cid)) | |
797 | return -EADDRINUSE; | |
798 | ||
433fc58e | 799 | /* Refuse if CID is already in use */ |
6db3d8dc | 800 | mutex_lock(&vhost_vsock_mutex); |
834e772c | 801 | other = vhost_vsock_get(guest_cid); |
6c083c2b | 802 | if (other && other != vsock) { |
6db3d8dc | 803 | mutex_unlock(&vhost_vsock_mutex); |
6c083c2b G |
804 | return -EADDRINUSE; |
805 | } | |
834e772c SH |
806 | |
807 | if (vsock->guest_cid) | |
808 | hash_del_rcu(&vsock->hash); | |
809 | ||
433fc58e | 810 | vsock->guest_cid = guest_cid; |
7fbe078c | 811 | hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid); |
6db3d8dc | 812 | mutex_unlock(&vhost_vsock_mutex); |
433fc58e AH |
813 | |
814 | return 0; | |
815 | } | |
816 | ||
817 | static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features) | |
818 | { | |
819 | struct vhost_virtqueue *vq; | |
820 | int i; | |
821 | ||
822 | if (features & ~VHOST_VSOCK_FEATURES) | |
823 | return -EOPNOTSUPP; | |
824 | ||
825 | mutex_lock(&vsock->dev.mutex); | |
826 | if ((features & (1 << VHOST_F_LOG_ALL)) && | |
827 | !vhost_log_access_ok(&vsock->dev)) { | |
e13a6915 SG |
828 | goto err; |
829 | } | |
830 | ||
831 | if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) { | |
832 | if (vhost_init_device_iotlb(&vsock->dev, true)) | |
833 | goto err; | |
433fc58e AH |
834 | } |
835 | ||
ced7b713 AK |
836 | if (features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET)) |
837 | vsock->seqpacket_allow = true; | |
838 | ||
433fc58e AH |
839 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { |
840 | vq = &vsock->vqs[i]; | |
841 | mutex_lock(&vq->mutex); | |
842 | vq->acked_features = features; | |
843 | mutex_unlock(&vq->mutex); | |
844 | } | |
845 | mutex_unlock(&vsock->dev.mutex); | |
846 | return 0; | |
e13a6915 SG |
847 | |
848 | err: | |
849 | mutex_unlock(&vsock->dev.mutex); | |
850 | return -EFAULT; | |
433fc58e AH |
851 | } |
852 | ||
853 | static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl, | |
854 | unsigned long arg) | |
855 | { | |
856 | struct vhost_vsock *vsock = f->private_data; | |
857 | void __user *argp = (void __user *)arg; | |
858 | u64 guest_cid; | |
859 | u64 features; | |
860 | int start; | |
861 | int r; | |
862 | ||
863 | switch (ioctl) { | |
864 | case VHOST_VSOCK_SET_GUEST_CID: | |
865 | if (copy_from_user(&guest_cid, argp, sizeof(guest_cid))) | |
866 | return -EFAULT; | |
867 | return vhost_vsock_set_cid(vsock, guest_cid); | |
868 | case VHOST_VSOCK_SET_RUNNING: | |
869 | if (copy_from_user(&start, argp, sizeof(start))) | |
870 | return -EFAULT; | |
871 | if (start) | |
872 | return vhost_vsock_start(vsock); | |
873 | else | |
a58da53f | 874 | return vhost_vsock_stop(vsock, true); |
433fc58e AH |
875 | case VHOST_GET_FEATURES: |
876 | features = VHOST_VSOCK_FEATURES; | |
877 | if (copy_to_user(argp, &features, sizeof(features))) | |
878 | return -EFAULT; | |
879 | return 0; | |
880 | case VHOST_SET_FEATURES: | |
881 | if (copy_from_user(&features, argp, sizeof(features))) | |
882 | return -EFAULT; | |
883 | return vhost_vsock_set_features(vsock, features); | |
e13a6915 SG |
884 | case VHOST_GET_BACKEND_FEATURES: |
885 | features = VHOST_VSOCK_BACKEND_FEATURES; | |
886 | if (copy_to_user(argp, &features, sizeof(features))) | |
887 | return -EFAULT; | |
888 | return 0; | |
889 | case VHOST_SET_BACKEND_FEATURES: | |
890 | if (copy_from_user(&features, argp, sizeof(features))) | |
891 | return -EFAULT; | |
892 | if (features & ~VHOST_VSOCK_BACKEND_FEATURES) | |
893 | return -EOPNOTSUPP; | |
894 | vhost_set_backend_features(&vsock->dev, features); | |
895 | return 0; | |
433fc58e AH |
896 | default: |
897 | mutex_lock(&vsock->dev.mutex); | |
898 | r = vhost_dev_ioctl(&vsock->dev, ioctl, argp); | |
899 | if (r == -ENOIOCTLCMD) | |
900 | r = vhost_vring_ioctl(&vsock->dev, ioctl, argp); | |
901 | else | |
902 | vhost_vsock_flush(vsock); | |
903 | mutex_unlock(&vsock->dev.mutex); | |
904 | return r; | |
905 | } | |
906 | } | |
907 | ||
e13a6915 SG |
908 | static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) |
909 | { | |
910 | struct file *file = iocb->ki_filp; | |
911 | struct vhost_vsock *vsock = file->private_data; | |
912 | struct vhost_dev *dev = &vsock->dev; | |
913 | int noblock = file->f_flags & O_NONBLOCK; | |
914 | ||
915 | return vhost_chr_read_iter(dev, to, noblock); | |
916 | } | |
917 | ||
918 | static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb, | |
919 | struct iov_iter *from) | |
920 | { | |
921 | struct file *file = iocb->ki_filp; | |
922 | struct vhost_vsock *vsock = file->private_data; | |
923 | struct vhost_dev *dev = &vsock->dev; | |
924 | ||
925 | return vhost_chr_write_iter(dev, from); | |
926 | } | |
927 | ||
928 | static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait) | |
929 | { | |
930 | struct vhost_vsock *vsock = file->private_data; | |
931 | struct vhost_dev *dev = &vsock->dev; | |
932 | ||
933 | return vhost_chr_poll(file, dev, wait); | |
934 | } | |
935 | ||
433fc58e AH |
936 | static const struct file_operations vhost_vsock_fops = { |
937 | .owner = THIS_MODULE, | |
938 | .open = vhost_vsock_dev_open, | |
939 | .release = vhost_vsock_dev_release, | |
940 | .llseek = noop_llseek, | |
941 | .unlocked_ioctl = vhost_vsock_dev_ioctl, | |
407e9ef7 | 942 | .compat_ioctl = compat_ptr_ioctl, |
e13a6915 SG |
943 | .read_iter = vhost_vsock_chr_read_iter, |
944 | .write_iter = vhost_vsock_chr_write_iter, | |
945 | .poll = vhost_vsock_chr_poll, | |
433fc58e AH |
946 | }; |
947 | ||
948 | static struct miscdevice vhost_vsock_misc = { | |
f4660cc9 | 949 | .minor = VHOST_VSOCK_MINOR, |
433fc58e AH |
950 | .name = "vhost-vsock", |
951 | .fops = &vhost_vsock_fops, | |
952 | }; | |
953 | ||
433fc58e AH |
954 | static int __init vhost_vsock_init(void) |
955 | { | |
956 | int ret; | |
957 | ||
c0cfa2d8 SG |
958 | ret = vsock_core_register(&vhost_transport.transport, |
959 | VSOCK_TRANSPORT_F_H2G); | |
433fc58e AH |
960 | if (ret < 0) |
961 | return ret; | |
962 | return misc_register(&vhost_vsock_misc); | |
963 | }; | |
964 | ||
965 | static void __exit vhost_vsock_exit(void) | |
966 | { | |
967 | misc_deregister(&vhost_vsock_misc); | |
c0cfa2d8 | 968 | vsock_core_unregister(&vhost_transport.transport); |
433fc58e AH |
969 | }; |
970 | ||
971 | module_init(vhost_vsock_init); | |
972 | module_exit(vhost_vsock_exit); | |
973 | MODULE_LICENSE("GPL v2"); | |
974 | MODULE_AUTHOR("Asias He"); | |
975 | MODULE_DESCRIPTION("vhost transport for vsock "); | |
f4660cc9 SH |
976 | MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR); |
977 | MODULE_ALIAS("devname:vhost-vsock"); |