Commit | Line | Data |
---|---|---|
7a338472 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
0ea9e1d3 AH |
2 | /* |
3 | * virtio transport for vsock | |
4 | * | |
5 | * Copyright (C) 2013-2015 Red Hat, Inc. | |
6 | * Author: Asias He <asias@redhat.com> | |
7 | * Stefan Hajnoczi <stefanha@redhat.com> | |
8 | * | |
9 | * Some of the code is take from Gerd Hoffmann <kraxel@redhat.com>'s | |
10 | * early virtio-vsock proof-of-concept bits. | |
0ea9e1d3 AH |
11 | */ |
12 | #include <linux/spinlock.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/list.h> | |
15 | #include <linux/atomic.h> | |
16 | #include <linux/virtio.h> | |
17 | #include <linux/virtio_ids.h> | |
18 | #include <linux/virtio_config.h> | |
19 | #include <linux/virtio_vsock.h> | |
20 | #include <net/sock.h> | |
21 | #include <linux/mutex.h> | |
22 | #include <net/af_vsock.h> | |
23 | ||
24 | static struct workqueue_struct *virtio_vsock_workqueue; | |
f961134a | 25 | static struct virtio_vsock __rcu *the_virtio_vsock; |
0ea9e1d3 | 26 | static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */ |
8e6ed963 | 27 | static struct virtio_transport virtio_transport; /* forward declaration */ |
0ea9e1d3 AH |
28 | |
29 | struct virtio_vsock { | |
30 | struct virtio_device *vdev; | |
31 | struct virtqueue *vqs[VSOCK_VQ_MAX]; | |
32 | ||
33 | /* Virtqueue processing is deferred to a workqueue */ | |
34 | struct work_struct tx_work; | |
35 | struct work_struct rx_work; | |
36 | struct work_struct event_work; | |
37 | ||
38 | /* The following fields are protected by tx_lock. vqs[VSOCK_VQ_TX] | |
39 | * must be accessed with tx_lock held. | |
40 | */ | |
41 | struct mutex tx_lock; | |
b917507e | 42 | bool tx_run; |
0ea9e1d3 AH |
43 | |
44 | struct work_struct send_pkt_work; | |
71dc9ec9 | 45 | struct sk_buff_head send_pkt_queue; |
0ea9e1d3 AH |
46 | |
47 | atomic_t queued_replies; | |
48 | ||
49 | /* The following fields are protected by rx_lock. vqs[VSOCK_VQ_RX] | |
50 | * must be accessed with rx_lock held. | |
51 | */ | |
52 | struct mutex rx_lock; | |
b917507e | 53 | bool rx_run; |
0ea9e1d3 AH |
54 | int rx_buf_nr; |
55 | int rx_buf_max_nr; | |
56 | ||
57 | /* The following fields are protected by event_lock. | |
58 | * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held. | |
59 | */ | |
60 | struct mutex event_lock; | |
b917507e | 61 | bool event_run; |
0ea9e1d3 AH |
62 | struct virtio_vsock_event event_list[8]; |
63 | ||
64 | u32 guest_cid; | |
53efbba1 | 65 | bool seqpacket_allow; |
0ea9e1d3 AH |
66 | }; |
67 | ||
0ea9e1d3 AH |
68 | static u32 virtio_transport_get_local_cid(void) |
69 | { | |
0deab087 SG |
70 | struct virtio_vsock *vsock; |
71 | u32 ret; | |
0ea9e1d3 | 72 | |
0deab087 SG |
73 | rcu_read_lock(); |
74 | vsock = rcu_dereference(the_virtio_vsock); | |
75 | if (!vsock) { | |
76 | ret = VMADDR_CID_ANY; | |
77 | goto out_rcu; | |
78 | } | |
22b5c0b6 | 79 | |
0deab087 SG |
80 | ret = vsock->guest_cid; |
81 | out_rcu: | |
82 | rcu_read_unlock(); | |
83 | return ret; | |
0ea9e1d3 AH |
84 | } |
85 | ||
86 | static void | |
87 | virtio_transport_send_pkt_work(struct work_struct *work) | |
88 | { | |
89 | struct virtio_vsock *vsock = | |
90 | container_of(work, struct virtio_vsock, send_pkt_work); | |
91 | struct virtqueue *vq; | |
92 | bool added = false; | |
93 | bool restart_rx = false; | |
94 | ||
95 | mutex_lock(&vsock->tx_lock); | |
96 | ||
b917507e SG |
97 | if (!vsock->tx_run) |
98 | goto out; | |
99 | ||
0ea9e1d3 AH |
100 | vq = vsock->vqs[VSOCK_VQ_TX]; |
101 | ||
0ea9e1d3 | 102 | for (;;) { |
0ea9e1d3 AH |
103 | struct scatterlist hdr, buf, *sgs[2]; |
104 | int ret, in_sg = 0, out_sg = 0; | |
71dc9ec9 | 105 | struct sk_buff *skb; |
0ea9e1d3 AH |
106 | bool reply; |
107 | ||
71dc9ec9 BE |
108 | skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue); |
109 | if (!skb) | |
0ea9e1d3 | 110 | break; |
0ea9e1d3 | 111 | |
71dc9ec9 BE |
112 | virtio_transport_deliver_tap_pkt(skb); |
113 | reply = virtio_vsock_skb_reply(skb); | |
82dfb540 | 114 | |
71dc9ec9 | 115 | sg_init_one(&hdr, virtio_vsock_hdr(skb), sizeof(*virtio_vsock_hdr(skb))); |
0ea9e1d3 | 116 | sgs[out_sg++] = &hdr; |
71dc9ec9 BE |
117 | if (skb->len > 0) { |
118 | sg_init_one(&buf, skb->data, skb->len); | |
0ea9e1d3 AH |
119 | sgs[out_sg++] = &buf; |
120 | } | |
121 | ||
71dc9ec9 | 122 | ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, GFP_KERNEL); |
21bc54fc GG |
123 | /* Usually this means that there is no more space available in |
124 | * the vq | |
125 | */ | |
0ea9e1d3 | 126 | if (ret < 0) { |
71dc9ec9 | 127 | virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb); |
0ea9e1d3 AH |
128 | break; |
129 | } | |
130 | ||
131 | if (reply) { | |
132 | struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; | |
133 | int val; | |
134 | ||
135 | val = atomic_dec_return(&vsock->queued_replies); | |
136 | ||
137 | /* Do we now have resources to resume rx processing? */ | |
138 | if (val + 1 == virtqueue_get_vring_size(rx_vq)) | |
139 | restart_rx = true; | |
140 | } | |
141 | ||
142 | added = true; | |
143 | } | |
144 | ||
145 | if (added) | |
146 | virtqueue_kick(vq); | |
147 | ||
b917507e | 148 | out: |
0ea9e1d3 AH |
149 | mutex_unlock(&vsock->tx_lock); |
150 | ||
151 | if (restart_rx) | |
152 | queue_work(virtio_vsock_workqueue, &vsock->rx_work); | |
153 | } | |
154 | ||
155 | static int | |
71dc9ec9 | 156 | virtio_transport_send_pkt(struct sk_buff *skb) |
0ea9e1d3 | 157 | { |
71dc9ec9 | 158 | struct virtio_vsock_hdr *hdr; |
0ea9e1d3 | 159 | struct virtio_vsock *vsock; |
71dc9ec9 BE |
160 | int len = skb->len; |
161 | ||
162 | hdr = virtio_vsock_hdr(skb); | |
0ea9e1d3 | 163 | |
0deab087 SG |
164 | rcu_read_lock(); |
165 | vsock = rcu_dereference(the_virtio_vsock); | |
0ea9e1d3 | 166 | if (!vsock) { |
71dc9ec9 | 167 | kfree_skb(skb); |
0deab087 SG |
168 | len = -ENODEV; |
169 | goto out_rcu; | |
0ea9e1d3 AH |
170 | } |
171 | ||
71dc9ec9 BE |
172 | if (le64_to_cpu(hdr->dst_cid) == vsock->guest_cid) { |
173 | kfree_skb(skb); | |
bf5432b1 | 174 | len = -ENODEV; |
0deab087 SG |
175 | goto out_rcu; |
176 | } | |
b9116823 | 177 | |
71dc9ec9 | 178 | if (virtio_vsock_skb_reply(skb)) |
0ea9e1d3 AH |
179 | atomic_inc(&vsock->queued_replies); |
180 | ||
71dc9ec9 | 181 | virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb); |
0ea9e1d3 | 182 | queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); |
0deab087 SG |
183 | |
184 | out_rcu: | |
185 | rcu_read_unlock(); | |
0ea9e1d3 AH |
186 | return len; |
187 | } | |
188 | ||
073b4f2c PT |
189 | static int |
190 | virtio_transport_cancel_pkt(struct vsock_sock *vsk) | |
191 | { | |
192 | struct virtio_vsock *vsock; | |
0deab087 | 193 | int cnt = 0, ret; |
073b4f2c | 194 | |
0deab087 SG |
195 | rcu_read_lock(); |
196 | vsock = rcu_dereference(the_virtio_vsock); | |
073b4f2c | 197 | if (!vsock) { |
0deab087 SG |
198 | ret = -ENODEV; |
199 | goto out_rcu; | |
073b4f2c PT |
200 | } |
201 | ||
71dc9ec9 | 202 | cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue); |
073b4f2c PT |
203 | |
204 | if (cnt) { | |
205 | struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; | |
206 | int new_cnt; | |
207 | ||
208 | new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); | |
209 | if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) && | |
210 | new_cnt < virtqueue_get_vring_size(rx_vq)) | |
211 | queue_work(virtio_vsock_workqueue, &vsock->rx_work); | |
212 | } | |
213 | ||
0deab087 SG |
214 | ret = 0; |
215 | ||
216 | out_rcu: | |
217 | rcu_read_unlock(); | |
218 | return ret; | |
073b4f2c PT |
219 | } |
220 | ||
0ea9e1d3 AH |
221 | static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) |
222 | { | |
71dc9ec9 BE |
223 | int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM; |
224 | struct scatterlist pkt, *p; | |
0ea9e1d3 | 225 | struct virtqueue *vq; |
71dc9ec9 | 226 | struct sk_buff *skb; |
0ea9e1d3 AH |
227 | int ret; |
228 | ||
229 | vq = vsock->vqs[VSOCK_VQ_RX]; | |
230 | ||
231 | do { | |
71dc9ec9 BE |
232 | skb = virtio_vsock_alloc_skb(total_len, GFP_KERNEL); |
233 | if (!skb) | |
0ea9e1d3 AH |
234 | break; |
235 | ||
71dc9ec9 BE |
236 | memset(skb->head, 0, VIRTIO_VSOCK_SKB_HEADROOM); |
237 | sg_init_one(&pkt, virtio_vsock_hdr(skb), total_len); | |
238 | p = &pkt; | |
239 | ret = virtqueue_add_sgs(vq, &p, 0, 1, skb, GFP_KERNEL); | |
240 | if (ret < 0) { | |
241 | kfree_skb(skb); | |
0ea9e1d3 AH |
242 | break; |
243 | } | |
244 | ||
0ea9e1d3 AH |
245 | vsock->rx_buf_nr++; |
246 | } while (vq->num_free); | |
247 | if (vsock->rx_buf_nr > vsock->rx_buf_max_nr) | |
248 | vsock->rx_buf_max_nr = vsock->rx_buf_nr; | |
249 | virtqueue_kick(vq); | |
250 | } | |
251 | ||
252 | static void virtio_transport_tx_work(struct work_struct *work) | |
253 | { | |
254 | struct virtio_vsock *vsock = | |
255 | container_of(work, struct virtio_vsock, tx_work); | |
256 | struct virtqueue *vq; | |
257 | bool added = false; | |
258 | ||
259 | vq = vsock->vqs[VSOCK_VQ_TX]; | |
260 | mutex_lock(&vsock->tx_lock); | |
b917507e SG |
261 | |
262 | if (!vsock->tx_run) | |
263 | goto out; | |
264 | ||
0ea9e1d3 | 265 | do { |
71dc9ec9 | 266 | struct sk_buff *skb; |
0ea9e1d3 AH |
267 | unsigned int len; |
268 | ||
269 | virtqueue_disable_cb(vq); | |
71dc9ec9 BE |
270 | while ((skb = virtqueue_get_buf(vq, &len)) != NULL) { |
271 | consume_skb(skb); | |
0ea9e1d3 AH |
272 | added = true; |
273 | } | |
274 | } while (!virtqueue_enable_cb(vq)); | |
b917507e SG |
275 | |
276 | out: | |
0ea9e1d3 AH |
277 | mutex_unlock(&vsock->tx_lock); |
278 | ||
279 | if (added) | |
280 | queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); | |
281 | } | |
282 | ||
283 | /* Is there space left for replies to rx packets? */ | |
284 | static bool virtio_transport_more_replies(struct virtio_vsock *vsock) | |
285 | { | |
286 | struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX]; | |
287 | int val; | |
288 | ||
289 | smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */ | |
290 | val = atomic_read(&vsock->queued_replies); | |
291 | ||
292 | return val < virtqueue_get_vring_size(vq); | |
293 | } | |
294 | ||
0ea9e1d3 AH |
295 | /* event_lock must be held */ |
296 | static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock, | |
297 | struct virtio_vsock_event *event) | |
298 | { | |
299 | struct scatterlist sg; | |
300 | struct virtqueue *vq; | |
301 | ||
302 | vq = vsock->vqs[VSOCK_VQ_EVENT]; | |
303 | ||
304 | sg_init_one(&sg, event, sizeof(*event)); | |
305 | ||
306 | return virtqueue_add_inbuf(vq, &sg, 1, event, GFP_KERNEL); | |
307 | } | |
308 | ||
309 | /* event_lock must be held */ | |
310 | static void virtio_vsock_event_fill(struct virtio_vsock *vsock) | |
311 | { | |
312 | size_t i; | |
313 | ||
314 | for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) { | |
315 | struct virtio_vsock_event *event = &vsock->event_list[i]; | |
316 | ||
317 | virtio_vsock_event_fill_one(vsock, event); | |
318 | } | |
319 | ||
320 | virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); | |
321 | } | |
322 | ||
323 | static void virtio_vsock_reset_sock(struct sock *sk) | |
324 | { | |
49b0b6ff LM |
325 | /* vmci_transport.c doesn't take sk_lock here either. At least we're |
326 | * under vsock_table_lock so the sock cannot disappear while we're | |
327 | * executing. | |
328 | */ | |
329 | ||
3b4477d2 | 330 | sk->sk_state = TCP_CLOSE; |
0ea9e1d3 | 331 | sk->sk_err = ECONNRESET; |
e3ae2365 | 332 | sk_error_report(sk); |
0ea9e1d3 AH |
333 | } |
334 | ||
335 | static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock) | |
336 | { | |
337 | struct virtio_device *vdev = vsock->vdev; | |
6c7efafd | 338 | __le64 guest_cid; |
0ea9e1d3 AH |
339 | |
340 | vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid), | |
341 | &guest_cid, sizeof(guest_cid)); | |
342 | vsock->guest_cid = le64_to_cpu(guest_cid); | |
343 | } | |
344 | ||
345 | /* event_lock must be held */ | |
346 | static void virtio_vsock_event_handle(struct virtio_vsock *vsock, | |
347 | struct virtio_vsock_event *event) | |
348 | { | |
349 | switch (le32_to_cpu(event->id)) { | |
350 | case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET: | |
351 | virtio_vsock_update_guest_cid(vsock); | |
8e6ed963 JP |
352 | vsock_for_each_connected_socket(&virtio_transport.transport, |
353 | virtio_vsock_reset_sock); | |
0ea9e1d3 AH |
354 | break; |
355 | } | |
356 | } | |
357 | ||
358 | static void virtio_transport_event_work(struct work_struct *work) | |
359 | { | |
360 | struct virtio_vsock *vsock = | |
361 | container_of(work, struct virtio_vsock, event_work); | |
362 | struct virtqueue *vq; | |
363 | ||
364 | vq = vsock->vqs[VSOCK_VQ_EVENT]; | |
365 | ||
366 | mutex_lock(&vsock->event_lock); | |
367 | ||
b917507e SG |
368 | if (!vsock->event_run) |
369 | goto out; | |
370 | ||
0ea9e1d3 AH |
371 | do { |
372 | struct virtio_vsock_event *event; | |
373 | unsigned int len; | |
374 | ||
375 | virtqueue_disable_cb(vq); | |
376 | while ((event = virtqueue_get_buf(vq, &len)) != NULL) { | |
377 | if (len == sizeof(*event)) | |
378 | virtio_vsock_event_handle(vsock, event); | |
379 | ||
380 | virtio_vsock_event_fill_one(vsock, event); | |
381 | } | |
382 | } while (!virtqueue_enable_cb(vq)); | |
383 | ||
384 | virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); | |
b917507e | 385 | out: |
0ea9e1d3 AH |
386 | mutex_unlock(&vsock->event_lock); |
387 | } | |
388 | ||
389 | static void virtio_vsock_event_done(struct virtqueue *vq) | |
390 | { | |
391 | struct virtio_vsock *vsock = vq->vdev->priv; | |
392 | ||
393 | if (!vsock) | |
394 | return; | |
395 | queue_work(virtio_vsock_workqueue, &vsock->event_work); | |
396 | } | |
397 | ||
398 | static void virtio_vsock_tx_done(struct virtqueue *vq) | |
399 | { | |
400 | struct virtio_vsock *vsock = vq->vdev->priv; | |
401 | ||
402 | if (!vsock) | |
403 | return; | |
404 | queue_work(virtio_vsock_workqueue, &vsock->tx_work); | |
405 | } | |
406 | ||
407 | static void virtio_vsock_rx_done(struct virtqueue *vq) | |
408 | { | |
409 | struct virtio_vsock *vsock = vq->vdev->priv; | |
410 | ||
411 | if (!vsock) | |
412 | return; | |
413 | queue_work(virtio_vsock_workqueue, &vsock->rx_work); | |
414 | } | |
415 | ||
53efbba1 AK |
416 | static bool virtio_transport_seqpacket_allow(u32 remote_cid); |
417 | ||
0ea9e1d3 AH |
418 | static struct virtio_transport virtio_transport = { |
419 | .transport = { | |
6a2c0962 SG |
420 | .module = THIS_MODULE, |
421 | ||
0ea9e1d3 AH |
422 | .get_local_cid = virtio_transport_get_local_cid, |
423 | ||
424 | .init = virtio_transport_do_socket_init, | |
425 | .destruct = virtio_transport_destruct, | |
426 | .release = virtio_transport_release, | |
427 | .connect = virtio_transport_connect, | |
428 | .shutdown = virtio_transport_shutdown, | |
073b4f2c | 429 | .cancel_pkt = virtio_transport_cancel_pkt, |
0ea9e1d3 AH |
430 | |
431 | .dgram_bind = virtio_transport_dgram_bind, | |
432 | .dgram_dequeue = virtio_transport_dgram_dequeue, | |
433 | .dgram_enqueue = virtio_transport_dgram_enqueue, | |
434 | .dgram_allow = virtio_transport_dgram_allow, | |
435 | ||
436 | .stream_dequeue = virtio_transport_stream_dequeue, | |
437 | .stream_enqueue = virtio_transport_stream_enqueue, | |
438 | .stream_has_data = virtio_transport_stream_has_data, | |
439 | .stream_has_space = virtio_transport_stream_has_space, | |
440 | .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, | |
441 | .stream_is_active = virtio_transport_stream_is_active, | |
442 | .stream_allow = virtio_transport_stream_allow, | |
443 | ||
53efbba1 AK |
444 | .seqpacket_dequeue = virtio_transport_seqpacket_dequeue, |
445 | .seqpacket_enqueue = virtio_transport_seqpacket_enqueue, | |
446 | .seqpacket_allow = virtio_transport_seqpacket_allow, | |
447 | .seqpacket_has_data = virtio_transport_seqpacket_has_data, | |
448 | ||
0ea9e1d3 AH |
449 | .notify_poll_in = virtio_transport_notify_poll_in, |
450 | .notify_poll_out = virtio_transport_notify_poll_out, | |
451 | .notify_recv_init = virtio_transport_notify_recv_init, | |
452 | .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, | |
453 | .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, | |
454 | .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, | |
455 | .notify_send_init = virtio_transport_notify_send_init, | |
456 | .notify_send_pre_block = virtio_transport_notify_send_pre_block, | |
457 | .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, | |
458 | .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, | |
b9f2b0ff | 459 | .notify_buffer_size = virtio_transport_notify_buffer_size, |
634f1a71 BE |
460 | |
461 | .read_skb = virtio_transport_read_skb, | |
0ea9e1d3 AH |
462 | }, |
463 | ||
464 | .send_pkt = virtio_transport_send_pkt, | |
465 | }; | |
466 | ||
53efbba1 AK |
467 | static bool virtio_transport_seqpacket_allow(u32 remote_cid) |
468 | { | |
469 | struct virtio_vsock *vsock; | |
470 | bool seqpacket_allow; | |
471 | ||
64295f0d | 472 | seqpacket_allow = false; |
53efbba1 AK |
473 | rcu_read_lock(); |
474 | vsock = rcu_dereference(the_virtio_vsock); | |
64295f0d ED |
475 | if (vsock) |
476 | seqpacket_allow = vsock->seqpacket_allow; | |
53efbba1 AK |
477 | rcu_read_unlock(); |
478 | ||
479 | return seqpacket_allow; | |
480 | } | |
481 | ||
4c7246dc SG |
482 | static void virtio_transport_rx_work(struct work_struct *work) |
483 | { | |
484 | struct virtio_vsock *vsock = | |
485 | container_of(work, struct virtio_vsock, rx_work); | |
486 | struct virtqueue *vq; | |
487 | ||
488 | vq = vsock->vqs[VSOCK_VQ_RX]; | |
489 | ||
490 | mutex_lock(&vsock->rx_lock); | |
491 | ||
492 | if (!vsock->rx_run) | |
493 | goto out; | |
494 | ||
495 | do { | |
496 | virtqueue_disable_cb(vq); | |
497 | for (;;) { | |
71dc9ec9 | 498 | struct sk_buff *skb; |
4c7246dc SG |
499 | unsigned int len; |
500 | ||
501 | if (!virtio_transport_more_replies(vsock)) { | |
502 | /* Stop rx until the device processes already | |
503 | * pending replies. Leave rx virtqueue | |
504 | * callbacks disabled. | |
505 | */ | |
506 | goto out; | |
507 | } | |
508 | ||
71dc9ec9 BE |
509 | skb = virtqueue_get_buf(vq, &len); |
510 | if (!skb) | |
4c7246dc | 511 | break; |
4c7246dc SG |
512 | |
513 | vsock->rx_buf_nr--; | |
514 | ||
515 | /* Drop short/long packets */ | |
71dc9ec9 BE |
516 | if (unlikely(len < sizeof(struct virtio_vsock_hdr) || |
517 | len > virtio_vsock_skb_len(skb))) { | |
518 | kfree_skb(skb); | |
4c7246dc SG |
519 | continue; |
520 | } | |
521 | ||
71dc9ec9 BE |
522 | virtio_vsock_skb_rx_put(skb); |
523 | virtio_transport_deliver_tap_pkt(skb); | |
524 | virtio_transport_recv_pkt(&virtio_transport, skb); | |
4c7246dc SG |
525 | } |
526 | } while (!virtqueue_enable_cb(vq)); | |
527 | ||
528 | out: | |
529 | if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2) | |
530 | virtio_vsock_rx_fill(vsock); | |
531 | mutex_unlock(&vsock->rx_lock); | |
532 | } | |
533 | ||
a1032098 | 534 | static int virtio_vsock_vqs_init(struct virtio_vsock *vsock) |
0ea9e1d3 | 535 | { |
a1032098 | 536 | struct virtio_device *vdev = vsock->vdev; |
0ea9e1d3 AH |
537 | static const char * const names[] = { |
538 | "rx", | |
539 | "tx", | |
540 | "event", | |
541 | }; | |
a1032098 SG |
542 | vq_callback_t *callbacks[] = { |
543 | virtio_vsock_rx_done, | |
544 | virtio_vsock_tx_done, | |
545 | virtio_vsock_event_done, | |
546 | }; | |
0ea9e1d3 AH |
547 | int ret; |
548 | ||
a1032098 | 549 | ret = virtio_find_vqs(vdev, VSOCK_VQ_MAX, vsock->vqs, callbacks, names, |
9b2bbdb2 | 550 | NULL); |
0ea9e1d3 | 551 | if (ret < 0) |
a1032098 | 552 | return ret; |
0ea9e1d3 AH |
553 | |
554 | virtio_vsock_update_guest_cid(vsock); | |
555 | ||
88704454 SG |
556 | virtio_device_ready(vdev); |
557 | ||
b917507e SG |
558 | mutex_lock(&vsock->tx_lock); |
559 | vsock->tx_run = true; | |
560 | mutex_unlock(&vsock->tx_lock); | |
561 | ||
0ea9e1d3 AH |
562 | mutex_lock(&vsock->rx_lock); |
563 | virtio_vsock_rx_fill(vsock); | |
b917507e | 564 | vsock->rx_run = true; |
0ea9e1d3 AH |
565 | mutex_unlock(&vsock->rx_lock); |
566 | ||
567 | mutex_lock(&vsock->event_lock); | |
568 | virtio_vsock_event_fill(vsock); | |
b917507e | 569 | vsock->event_run = true; |
0ea9e1d3 AH |
570 | mutex_unlock(&vsock->event_lock); |
571 | ||
0ea9e1d3 | 572 | return 0; |
0ea9e1d3 AH |
573 | } |
574 | ||
a1032098 | 575 | static void virtio_vsock_vqs_del(struct virtio_vsock *vsock) |
0ea9e1d3 | 576 | { |
a1032098 | 577 | struct virtio_device *vdev = vsock->vdev; |
71dc9ec9 | 578 | struct sk_buff *skb; |
0ea9e1d3 | 579 | |
a1032098 | 580 | /* Reset all connected sockets when the VQs disappear */ |
8e6ed963 JP |
581 | vsock_for_each_connected_socket(&virtio_transport.transport, |
582 | virtio_vsock_reset_sock); | |
85965487 | 583 | |
b917507e | 584 | /* Stop all work handlers to make sure no one is accessing the device, |
d9679d00 | 585 | * so we can safely call virtio_reset_device(). |
b917507e SG |
586 | */ |
587 | mutex_lock(&vsock->rx_lock); | |
588 | vsock->rx_run = false; | |
589 | mutex_unlock(&vsock->rx_lock); | |
590 | ||
591 | mutex_lock(&vsock->tx_lock); | |
592 | vsock->tx_run = false; | |
593 | mutex_unlock(&vsock->tx_lock); | |
594 | ||
595 | mutex_lock(&vsock->event_lock); | |
596 | vsock->event_run = false; | |
597 | mutex_unlock(&vsock->event_lock); | |
598 | ||
599 | /* Flush all device writes and interrupts, device will not use any | |
600 | * more buffers. | |
601 | */ | |
d9679d00 | 602 | virtio_reset_device(vdev); |
0ea9e1d3 AH |
603 | |
604 | mutex_lock(&vsock->rx_lock); | |
71dc9ec9 BE |
605 | while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX]))) |
606 | kfree_skb(skb); | |
0ea9e1d3 AH |
607 | mutex_unlock(&vsock->rx_lock); |
608 | ||
609 | mutex_lock(&vsock->tx_lock); | |
71dc9ec9 BE |
610 | while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX]))) |
611 | kfree_skb(skb); | |
0ea9e1d3 AH |
612 | mutex_unlock(&vsock->tx_lock); |
613 | ||
71dc9ec9 | 614 | virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue); |
0ea9e1d3 | 615 | |
b917507e | 616 | /* Delete virtqueues and flush outstanding callbacks if any */ |
0ea9e1d3 | 617 | vdev->config->del_vqs(vdev); |
a1032098 SG |
618 | } |
619 | ||
620 | static int virtio_vsock_probe(struct virtio_device *vdev) | |
621 | { | |
622 | struct virtio_vsock *vsock = NULL; | |
623 | int ret; | |
624 | ||
625 | ret = mutex_lock_interruptible(&the_virtio_vsock_mutex); | |
626 | if (ret) | |
627 | return ret; | |
628 | ||
629 | /* Only one virtio-vsock device per guest is supported */ | |
630 | if (rcu_dereference_protected(the_virtio_vsock, | |
631 | lockdep_is_held(&the_virtio_vsock_mutex))) { | |
632 | ret = -EBUSY; | |
633 | goto out; | |
634 | } | |
635 | ||
636 | vsock = kzalloc(sizeof(*vsock), GFP_KERNEL); | |
637 | if (!vsock) { | |
638 | ret = -ENOMEM; | |
639 | goto out; | |
640 | } | |
641 | ||
642 | vsock->vdev = vdev; | |
643 | ||
644 | vsock->rx_buf_nr = 0; | |
645 | vsock->rx_buf_max_nr = 0; | |
646 | atomic_set(&vsock->queued_replies, 0); | |
647 | ||
648 | mutex_init(&vsock->tx_lock); | |
649 | mutex_init(&vsock->rx_lock); | |
650 | mutex_init(&vsock->event_lock); | |
71dc9ec9 | 651 | skb_queue_head_init(&vsock->send_pkt_queue); |
a1032098 SG |
652 | INIT_WORK(&vsock->rx_work, virtio_transport_rx_work); |
653 | INIT_WORK(&vsock->tx_work, virtio_transport_tx_work); | |
654 | INIT_WORK(&vsock->event_work, virtio_transport_event_work); | |
655 | INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work); | |
656 | ||
657 | if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET)) | |
658 | vsock->seqpacket_allow = true; | |
659 | ||
660 | vdev->priv = vsock; | |
661 | ||
662 | ret = virtio_vsock_vqs_init(vsock); | |
663 | if (ret < 0) | |
664 | goto out; | |
665 | ||
666 | rcu_assign_pointer(the_virtio_vsock, vsock); | |
667 | ||
668 | mutex_unlock(&the_virtio_vsock_mutex); | |
669 | ||
670 | return 0; | |
671 | ||
672 | out: | |
673 | kfree(vsock); | |
674 | mutex_unlock(&the_virtio_vsock_mutex); | |
675 | return ret; | |
676 | } | |
677 | ||
678 | static void virtio_vsock_remove(struct virtio_device *vdev) | |
679 | { | |
680 | struct virtio_vsock *vsock = vdev->priv; | |
681 | ||
682 | mutex_lock(&the_virtio_vsock_mutex); | |
683 | ||
684 | vdev->priv = NULL; | |
685 | rcu_assign_pointer(the_virtio_vsock, NULL); | |
686 | synchronize_rcu(); | |
687 | ||
688 | virtio_vsock_vqs_del(vsock); | |
0ea9e1d3 | 689 | |
e226121f SG |
690 | /* Other works can be queued before 'config->del_vqs()', so we flush |
691 | * all works before to free the vsock object to avoid use after free. | |
692 | */ | |
e226121f SG |
693 | flush_work(&vsock->rx_work); |
694 | flush_work(&vsock->tx_work); | |
695 | flush_work(&vsock->event_work); | |
696 | flush_work(&vsock->send_pkt_work); | |
697 | ||
0deab087 SG |
698 | mutex_unlock(&the_virtio_vsock_mutex); |
699 | ||
0ea9e1d3 AH |
700 | kfree(vsock); |
701 | } | |
702 | ||
bd50c5dc SG |
703 | #ifdef CONFIG_PM_SLEEP |
704 | static int virtio_vsock_freeze(struct virtio_device *vdev) | |
705 | { | |
706 | struct virtio_vsock *vsock = vdev->priv; | |
707 | ||
708 | mutex_lock(&the_virtio_vsock_mutex); | |
709 | ||
710 | rcu_assign_pointer(the_virtio_vsock, NULL); | |
711 | synchronize_rcu(); | |
712 | ||
713 | virtio_vsock_vqs_del(vsock); | |
714 | ||
715 | mutex_unlock(&the_virtio_vsock_mutex); | |
716 | ||
717 | return 0; | |
718 | } | |
719 | ||
720 | static int virtio_vsock_restore(struct virtio_device *vdev) | |
721 | { | |
722 | struct virtio_vsock *vsock = vdev->priv; | |
723 | int ret; | |
724 | ||
725 | mutex_lock(&the_virtio_vsock_mutex); | |
726 | ||
727 | /* Only one virtio-vsock device per guest is supported */ | |
728 | if (rcu_dereference_protected(the_virtio_vsock, | |
729 | lockdep_is_held(&the_virtio_vsock_mutex))) { | |
730 | ret = -EBUSY; | |
731 | goto out; | |
732 | } | |
733 | ||
734 | ret = virtio_vsock_vqs_init(vsock); | |
735 | if (ret < 0) | |
736 | goto out; | |
737 | ||
738 | rcu_assign_pointer(the_virtio_vsock, vsock); | |
739 | ||
740 | out: | |
741 | mutex_unlock(&the_virtio_vsock_mutex); | |
742 | return ret; | |
743 | } | |
744 | #endif /* CONFIG_PM_SLEEP */ | |
745 | ||
0ea9e1d3 AH |
746 | static struct virtio_device_id id_table[] = { |
747 | { VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID }, | |
748 | { 0 }, | |
749 | }; | |
750 | ||
751 | static unsigned int features[] = { | |
53efbba1 | 752 | VIRTIO_VSOCK_F_SEQPACKET |
0ea9e1d3 AH |
753 | }; |
754 | ||
755 | static struct virtio_driver virtio_vsock_driver = { | |
756 | .feature_table = features, | |
757 | .feature_table_size = ARRAY_SIZE(features), | |
758 | .driver.name = KBUILD_MODNAME, | |
759 | .driver.owner = THIS_MODULE, | |
760 | .id_table = id_table, | |
761 | .probe = virtio_vsock_probe, | |
762 | .remove = virtio_vsock_remove, | |
bd50c5dc SG |
763 | #ifdef CONFIG_PM_SLEEP |
764 | .freeze = virtio_vsock_freeze, | |
765 | .restore = virtio_vsock_restore, | |
766 | #endif | |
0ea9e1d3 AH |
767 | }; |
768 | ||
769 | static int __init virtio_vsock_init(void) | |
770 | { | |
771 | int ret; | |
772 | ||
773 | virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0); | |
774 | if (!virtio_vsock_workqueue) | |
775 | return -ENOMEM; | |
22b5c0b6 | 776 | |
c0cfa2d8 SG |
777 | ret = vsock_core_register(&virtio_transport.transport, |
778 | VSOCK_TRANSPORT_F_G2H); | |
0ea9e1d3 | 779 | if (ret) |
22b5c0b6 SG |
780 | goto out_wq; |
781 | ||
ba95e5df | 782 | ret = register_virtio_driver(&virtio_vsock_driver); |
22b5c0b6 | 783 | if (ret) |
ba95e5df | 784 | goto out_vci; |
22b5c0b6 SG |
785 | |
786 | return 0; | |
787 | ||
ba95e5df | 788 | out_vci: |
c0cfa2d8 | 789 | vsock_core_unregister(&virtio_transport.transport); |
22b5c0b6 SG |
790 | out_wq: |
791 | destroy_workqueue(virtio_vsock_workqueue); | |
0ea9e1d3 AH |
792 | return ret; |
793 | } | |
794 | ||
795 | static void __exit virtio_vsock_exit(void) | |
796 | { | |
797 | unregister_virtio_driver(&virtio_vsock_driver); | |
c0cfa2d8 | 798 | vsock_core_unregister(&virtio_transport.transport); |
0ea9e1d3 AH |
799 | destroy_workqueue(virtio_vsock_workqueue); |
800 | } | |
801 | ||
802 | module_init(virtio_vsock_init); | |
803 | module_exit(virtio_vsock_exit); | |
804 | MODULE_LICENSE("GPL v2"); | |
805 | MODULE_AUTHOR("Asias He"); | |
806 | MODULE_DESCRIPTION("virtio transport for vsock"); | |
807 | MODULE_DEVICE_TABLE(virtio, id_table); |