Commit | Line | Data |
---|---|---|
7a338472 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
3a4d5c94 MT |
2 | /* Copyright (C) 2009 Red Hat, Inc. |
3 | * Author: Michael S. Tsirkin <mst@redhat.com> | |
4 | * | |
3a4d5c94 MT |
5 | * virtio-net server in host kernel. |
6 | */ | |
7 | ||
8 | #include <linux/compat.h> | |
9 | #include <linux/eventfd.h> | |
10 | #include <linux/vhost.h> | |
11 | #include <linux/virtio_net.h> | |
3a4d5c94 MT |
12 | #include <linux/miscdevice.h> |
13 | #include <linux/module.h> | |
bab632d6 | 14 | #include <linux/moduleparam.h> |
3a4d5c94 MT |
15 | #include <linux/mutex.h> |
16 | #include <linux/workqueue.h> | |
3a4d5c94 | 17 | #include <linux/file.h> |
5a0e3ad6 | 18 | #include <linux/slab.h> |
e6017571 | 19 | #include <linux/sched/clock.h> |
174cd4b1 | 20 | #include <linux/sched/signal.h> |
23cc5a99 | 21 | #include <linux/vmalloc.h> |
3a4d5c94 MT |
22 | |
23 | #include <linux/net.h> | |
24 | #include <linux/if_packet.h> | |
25 | #include <linux/if_arp.h> | |
26 | #include <linux/if_tun.h> | |
501c774c | 27 | #include <linux/if_macvlan.h> |
635b8c8e | 28 | #include <linux/if_tap.h> |
c53cff5e | 29 | #include <linux/if_vlan.h> |
c67df11f JW |
30 | #include <linux/skb_array.h> |
31 | #include <linux/skbuff.h> | |
3a4d5c94 MT |
32 | |
33 | #include <net/sock.h> | |
1ffcbc85 | 34 | #include <net/xdp.h> |
3a4d5c94 MT |
35 | |
36 | #include "vhost.h" | |
37 | ||
098eadce | 38 | static int experimental_zcopytx = 0; |
bab632d6 | 39 | module_param(experimental_zcopytx, int, 0444); |
f9611c43 MT |
40 | MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;" |
41 | " 1 -Enable; 0 - Disable"); | |
bab632d6 | 42 | |
3a4d5c94 MT |
43 | /* Max number of bytes transferred before requeueing the job. |
44 | * Using this limit prevents one virtqueue from starving others. */ | |
45 | #define VHOST_NET_WEIGHT 0x80000 | |
46 | ||
a2ac9990 | 47 | /* Max number of packets transferred before requeueing the job. |
db688c24 PA |
48 | * Using this limit prevents one virtqueue from starving others with small |
49 | * pkts. | |
50 | */ | |
51 | #define VHOST_NET_PKT_WEIGHT 256 | |
a2ac9990 | 52 | |
bab632d6 MT |
53 | /* MAX number of TX used buffers for outstanding zerocopy */ |
54 | #define VHOST_MAX_PEND 128 | |
55 | #define VHOST_GOODCOPY_LEN 256 | |
56 | ||
eaae8132 MT |
57 | /* |
58 | * For transmit, used buffer len is unused; we override it to track buffer | |
59 | * status internally; used for zerocopy tx only. | |
60 | */ | |
61 | /* Lower device DMA failed */ | |
bf995734 | 62 | #define VHOST_DMA_FAILED_LEN ((__force __virtio32)3) |
eaae8132 | 63 | /* Lower device DMA done */ |
bf995734 | 64 | #define VHOST_DMA_DONE_LEN ((__force __virtio32)2) |
eaae8132 | 65 | /* Lower device DMA in progress */ |
bf995734 | 66 | #define VHOST_DMA_IN_PROGRESS ((__force __virtio32)1) |
eaae8132 | 67 | /* Buffer unused */ |
bf995734 | 68 | #define VHOST_DMA_CLEAR_LEN ((__force __virtio32)0) |
eaae8132 | 69 | |
bf995734 | 70 | #define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN) |
eaae8132 | 71 | |
8570a6e7 AH |
72 | enum { |
73 | VHOST_NET_FEATURES = VHOST_FEATURES | | |
74 | (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) | | |
6b1e6cc7 | 75 | (1ULL << VIRTIO_NET_F_MRG_RXBUF) | |
313389be KX |
76 | (1ULL << VIRTIO_F_ACCESS_PLATFORM) | |
77 | (1ULL << VIRTIO_F_RING_RESET) | |
8570a6e7 AH |
78 | }; |
79 | ||
429711ae JW |
80 | enum { |
81 | VHOST_NET_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) | |
82 | }; | |
83 | ||
3a4d5c94 MT |
84 | enum { |
85 | VHOST_NET_VQ_RX = 0, | |
86 | VHOST_NET_VQ_TX = 1, | |
87 | VHOST_NET_VQ_MAX = 2, | |
88 | }; | |
89 | ||
fe729a57 | 90 | struct vhost_net_ubuf_ref { |
0ad8b480 MT |
91 | /* refcount follows semantics similar to kref: |
92 | * 0: object is released | |
93 | * 1: no outstanding ubufs | |
94 | * >1: outstanding ubufs | |
95 | */ | |
96 | atomic_t refcount; | |
2839400f AH |
97 | wait_queue_head_t wait; |
98 | struct vhost_virtqueue *vq; | |
99 | }; | |
100 | ||
d0d86971 | 101 | #define VHOST_NET_BATCH 64 |
c67df11f | 102 | struct vhost_net_buf { |
5990a305 | 103 | void **queue; |
c67df11f JW |
104 | int tail; |
105 | int head; | |
106 | }; | |
107 | ||
3ab2e420 AH |
108 | struct vhost_net_virtqueue { |
109 | struct vhost_virtqueue vq; | |
81f95a55 MT |
110 | size_t vhost_hlen; |
111 | size_t sock_hlen; | |
2839400f AH |
112 | /* vhost zerocopy support fields below: */ |
113 | /* last used idx for outstanding DMA zerocopy buffers */ | |
114 | int upend_idx; | |
f5a4941a JW |
115 | /* For TX, first used idx for DMA done zerocopy buffers |
116 | * For RX, number of batched heads | |
117 | */ | |
2839400f | 118 | int done_idx; |
0a0be13b JW |
119 | /* Number of XDP frames batched */ |
120 | int batched_xdp; | |
2839400f | 121 | /* an array of userspace buffers info */ |
dfff202b | 122 | struct ubuf_info_msgzc *ubuf_info; |
2839400f AH |
123 | /* Reference counting for outstanding ubufs. |
124 | * Protected by vq mutex. Writers must also take device mutex. */ | |
fe729a57 | 125 | struct vhost_net_ubuf_ref *ubufs; |
5990a305 | 126 | struct ptr_ring *rx_ring; |
c67df11f | 127 | struct vhost_net_buf rxq; |
0a0be13b JW |
128 | /* Batched XDP buffs */ |
129 | struct xdp_buff *xdp; | |
3ab2e420 AH |
130 | }; |
131 | ||
3a4d5c94 MT |
132 | struct vhost_net { |
133 | struct vhost_dev dev; | |
3ab2e420 | 134 | struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX]; |
3a4d5c94 | 135 | struct vhost_poll poll[VHOST_NET_VQ_MAX]; |
eaae8132 MT |
136 | /* Number of TX recently submitted. |
137 | * Protected by tx vq lock. */ | |
138 | unsigned tx_packets; | |
139 | /* Number of times zerocopy TX recently failed. | |
140 | * Protected by tx vq lock. */ | |
141 | unsigned tx_zcopy_err; | |
1280c27f MT |
142 | /* Flush in progress. Protected by tx vq lock. */ |
143 | bool tx_flush; | |
e4dab1e6 JW |
144 | /* Private page frag */ |
145 | struct page_frag page_frag; | |
146 | /* Refcount bias of page frag */ | |
147 | int refcnt_bias; | |
3a4d5c94 MT |
148 | }; |
149 | ||
fe729a57 | 150 | static unsigned vhost_net_zcopy_mask __read_mostly; |
2839400f | 151 | |
c67df11f JW |
152 | static void *vhost_net_buf_get_ptr(struct vhost_net_buf *rxq) |
153 | { | |
154 | if (rxq->tail != rxq->head) | |
155 | return rxq->queue[rxq->head]; | |
156 | else | |
157 | return NULL; | |
158 | } | |
159 | ||
160 | static int vhost_net_buf_get_size(struct vhost_net_buf *rxq) | |
161 | { | |
162 | return rxq->tail - rxq->head; | |
163 | } | |
164 | ||
165 | static int vhost_net_buf_is_empty(struct vhost_net_buf *rxq) | |
166 | { | |
167 | return rxq->tail == rxq->head; | |
168 | } | |
169 | ||
170 | static void *vhost_net_buf_consume(struct vhost_net_buf *rxq) | |
171 | { | |
172 | void *ret = vhost_net_buf_get_ptr(rxq); | |
173 | ++rxq->head; | |
174 | return ret; | |
175 | } | |
176 | ||
177 | static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq) | |
178 | { | |
179 | struct vhost_net_buf *rxq = &nvq->rxq; | |
180 | ||
181 | rxq->head = 0; | |
5990a305 | 182 | rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue, |
d0d86971 | 183 | VHOST_NET_BATCH); |
c67df11f JW |
184 | return rxq->tail; |
185 | } | |
186 | ||
187 | static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq) | |
188 | { | |
189 | struct vhost_net_buf *rxq = &nvq->rxq; | |
190 | ||
5990a305 JW |
191 | if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) { |
192 | ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head, | |
193 | vhost_net_buf_get_size(rxq), | |
3a403076 | 194 | tun_ptr_free); |
c67df11f JW |
195 | rxq->head = rxq->tail = 0; |
196 | } | |
197 | } | |
198 | ||
fc72d1d5 JW |
199 | static int vhost_net_buf_peek_len(void *ptr) |
200 | { | |
1ffcbc85 JDB |
201 | if (tun_is_xdp_frame(ptr)) { |
202 | struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); | |
fc72d1d5 | 203 | |
1ffcbc85 | 204 | return xdpf->len; |
fc72d1d5 JW |
205 | } |
206 | ||
207 | return __skb_array_len_with_tag(ptr); | |
208 | } | |
209 | ||
c67df11f JW |
210 | static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq) |
211 | { | |
212 | struct vhost_net_buf *rxq = &nvq->rxq; | |
213 | ||
214 | if (!vhost_net_buf_is_empty(rxq)) | |
215 | goto out; | |
216 | ||
217 | if (!vhost_net_buf_produce(nvq)) | |
218 | return 0; | |
219 | ||
220 | out: | |
fc72d1d5 | 221 | return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq)); |
c67df11f JW |
222 | } |
223 | ||
224 | static void vhost_net_buf_init(struct vhost_net_buf *rxq) | |
225 | { | |
226 | rxq->head = rxq->tail = 0; | |
227 | } | |
228 | ||
fe729a57 | 229 | static void vhost_net_enable_zcopy(int vq) |
2839400f | 230 | { |
fe729a57 | 231 | vhost_net_zcopy_mask |= 0x1 << vq; |
2839400f AH |
232 | } |
233 | ||
fe729a57 AH |
234 | static struct vhost_net_ubuf_ref * |
235 | vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) | |
2839400f | 236 | { |
fe729a57 | 237 | struct vhost_net_ubuf_ref *ubufs; |
2839400f AH |
238 | /* No zero copy backend? Nothing to count. */ |
239 | if (!zcopy) | |
240 | return NULL; | |
241 | ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL); | |
242 | if (!ubufs) | |
243 | return ERR_PTR(-ENOMEM); | |
0ad8b480 | 244 | atomic_set(&ubufs->refcount, 1); |
2839400f AH |
245 | init_waitqueue_head(&ubufs->wait); |
246 | ubufs->vq = vq; | |
247 | return ubufs; | |
248 | } | |
249 | ||
0ad8b480 | 250 | static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) |
2839400f | 251 | { |
0ad8b480 MT |
252 | int r = atomic_sub_return(1, &ubufs->refcount); |
253 | if (unlikely(!r)) | |
254 | wake_up(&ubufs->wait); | |
255 | return r; | |
2839400f AH |
256 | } |
257 | ||
fe729a57 | 258 | static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) |
2839400f | 259 | { |
0ad8b480 MT |
260 | vhost_net_ubuf_put(ubufs); |
261 | wait_event(ubufs->wait, !atomic_read(&ubufs->refcount)); | |
c38e39c3 MT |
262 | } |
263 | ||
264 | static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) | |
265 | { | |
266 | vhost_net_ubuf_put_and_wait(ubufs); | |
2839400f AH |
267 | kfree(ubufs); |
268 | } | |
269 | ||
b1ad8496 AH |
270 | static void vhost_net_clear_ubuf_info(struct vhost_net *n) |
271 | { | |
b1ad8496 AH |
272 | int i; |
273 | ||
288cfe78 MT |
274 | for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { |
275 | kfree(n->vqs[i].ubuf_info); | |
276 | n->vqs[i].ubuf_info = NULL; | |
b1ad8496 AH |
277 | } |
278 | } | |
279 | ||
0a1febf7 | 280 | static int vhost_net_set_ubuf_info(struct vhost_net *n) |
2839400f AH |
281 | { |
282 | bool zcopy; | |
283 | int i; | |
284 | ||
288cfe78 | 285 | for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { |
fe729a57 | 286 | zcopy = vhost_net_zcopy_mask & (0x1 << i); |
2839400f AH |
287 | if (!zcopy) |
288 | continue; | |
6da2ec56 KC |
289 | n->vqs[i].ubuf_info = |
290 | kmalloc_array(UIO_MAXIOV, | |
291 | sizeof(*n->vqs[i].ubuf_info), | |
292 | GFP_KERNEL); | |
2839400f AH |
293 | if (!n->vqs[i].ubuf_info) |
294 | goto err; | |
295 | } | |
296 | return 0; | |
297 | ||
298 | err: | |
288cfe78 | 299 | vhost_net_clear_ubuf_info(n); |
2839400f AH |
300 | return -ENOMEM; |
301 | } | |
302 | ||
0a1febf7 | 303 | static void vhost_net_vq_reset(struct vhost_net *n) |
2839400f AH |
304 | { |
305 | int i; | |
306 | ||
288cfe78 MT |
307 | vhost_net_clear_ubuf_info(n); |
308 | ||
2839400f AH |
309 | for (i = 0; i < VHOST_NET_VQ_MAX; i++) { |
310 | n->vqs[i].done_idx = 0; | |
311 | n->vqs[i].upend_idx = 0; | |
312 | n->vqs[i].ubufs = NULL; | |
81f95a55 MT |
313 | n->vqs[i].vhost_hlen = 0; |
314 | n->vqs[i].sock_hlen = 0; | |
c67df11f | 315 | vhost_net_buf_init(&n->vqs[i].rxq); |
2839400f AH |
316 | } |
317 | ||
318 | } | |
319 | ||
eaae8132 MT |
320 | static void vhost_net_tx_packet(struct vhost_net *net) |
321 | { | |
322 | ++net->tx_packets; | |
323 | if (net->tx_packets < 1024) | |
324 | return; | |
325 | net->tx_packets = 0; | |
326 | net->tx_zcopy_err = 0; | |
327 | } | |
328 | ||
329 | static void vhost_net_tx_err(struct vhost_net *net) | |
330 | { | |
331 | ++net->tx_zcopy_err; | |
332 | } | |
333 | ||
334 | static bool vhost_net_tx_select_zcopy(struct vhost_net *net) | |
335 | { | |
1280c27f MT |
336 | /* TX flush waits for outstanding DMAs to be done. |
337 | * Don't start new DMAs. | |
338 | */ | |
339 | return !net->tx_flush && | |
340 | net->tx_packets / 64 >= net->tx_zcopy_err; | |
eaae8132 MT |
341 | } |
342 | ||
bab632d6 MT |
343 | static bool vhost_sock_zcopy(struct socket *sock) |
344 | { | |
345 | return unlikely(experimental_zcopytx) && | |
346 | sock_flag(sock->sk, SOCK_ZEROCOPY); | |
347 | } | |
348 | ||
0a0be13b JW |
349 | static bool vhost_sock_xdp(struct socket *sock) |
350 | { | |
351 | return sock_flag(sock->sk, SOCK_XDP); | |
352 | } | |
353 | ||
b211616d MT |
354 | /* In case of DMA done not in order in lower device driver for some reason. |
355 | * upend_idx is used to track end of used idx, done_idx is used to track head | |
356 | * of used idx. Once lower device DMA done contiguously, we will signal KVM | |
357 | * guest used idx. | |
358 | */ | |
094afe7d JW |
359 | static void vhost_zerocopy_signal_used(struct vhost_net *net, |
360 | struct vhost_virtqueue *vq) | |
b211616d | 361 | { |
2839400f AH |
362 | struct vhost_net_virtqueue *nvq = |
363 | container_of(vq, struct vhost_net_virtqueue, vq); | |
c92112ae | 364 | int i, add; |
b211616d MT |
365 | int j = 0; |
366 | ||
2839400f | 367 | for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) { |
eaae8132 MT |
368 | if (vq->heads[i].len == VHOST_DMA_FAILED_LEN) |
369 | vhost_net_tx_err(net); | |
b211616d MT |
370 | if (VHOST_DMA_IS_DONE(vq->heads[i].len)) { |
371 | vq->heads[i].len = VHOST_DMA_CLEAR_LEN; | |
b211616d MT |
372 | ++j; |
373 | } else | |
374 | break; | |
375 | } | |
c92112ae JW |
376 | while (j) { |
377 | add = min(UIO_MAXIOV - nvq->done_idx, j); | |
378 | vhost_add_used_and_signal_n(vq->dev, vq, | |
379 | &vq->heads[nvq->done_idx], add); | |
380 | nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV; | |
381 | j -= add; | |
382 | } | |
b211616d MT |
383 | } |
384 | ||
36177832 | 385 | static void vhost_zerocopy_callback(struct sk_buff *skb, |
dfff202b | 386 | struct ubuf_info *ubuf_base, bool success) |
b211616d | 387 | { |
dfff202b | 388 | struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base); |
fe729a57 | 389 | struct vhost_net_ubuf_ref *ubufs = ubuf->ctx; |
b211616d | 390 | struct vhost_virtqueue *vq = ubufs->vq; |
0ad8b480 | 391 | int cnt; |
24eb21a1 | 392 | |
b0c057ca MT |
393 | rcu_read_lock_bh(); |
394 | ||
19c73b3e JW |
395 | /* set len to mark this desc buffers done DMA */ |
396 | vq->heads[ubuf->desc].len = success ? | |
397 | VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; | |
0ad8b480 | 398 | cnt = vhost_net_ubuf_put(ubufs); |
19c73b3e | 399 | |
24eb21a1 MT |
400 | /* |
401 | * Trigger polling thread if guest stopped submitting new buffers: | |
0ad8b480 | 402 | * in this case, the refcount after decrement will eventually reach 1. |
24eb21a1 MT |
403 | * We also trigger polling periodically after each 16 packets |
404 | * (the value 16 here is more or less arbitrary, it's tuned to trigger | |
405 | * less than 10% of times). | |
406 | */ | |
0ad8b480 | 407 | if (cnt <= 1 || !(cnt % 16)) |
24eb21a1 | 408 | vhost_poll_queue(&vq->poll); |
b0c057ca MT |
409 | |
410 | rcu_read_unlock_bh(); | |
b211616d MT |
411 | } |
412 | ||
03088137 JW |
413 | static inline unsigned long busy_clock(void) |
414 | { | |
415 | return local_clock() >> 10; | |
416 | } | |
417 | ||
027b1760 | 418 | static bool vhost_can_busy_poll(unsigned long endtime) |
03088137 | 419 | { |
027b1760 TM |
420 | return likely(!need_resched() && !time_after(busy_clock(), endtime) && |
421 | !signal_pending(current)); | |
03088137 JW |
422 | } |
423 | ||
8241a1e4 JW |
424 | static void vhost_net_disable_vq(struct vhost_net *n, |
425 | struct vhost_virtqueue *vq) | |
426 | { | |
427 | struct vhost_net_virtqueue *nvq = | |
428 | container_of(vq, struct vhost_net_virtqueue, vq); | |
429 | struct vhost_poll *poll = n->poll + (nvq - n->vqs); | |
247643f8 | 430 | if (!vhost_vq_get_backend(vq)) |
8241a1e4 JW |
431 | return; |
432 | vhost_poll_stop(poll); | |
433 | } | |
434 | ||
435 | static int vhost_net_enable_vq(struct vhost_net *n, | |
436 | struct vhost_virtqueue *vq) | |
437 | { | |
438 | struct vhost_net_virtqueue *nvq = | |
439 | container_of(vq, struct vhost_net_virtqueue, vq); | |
440 | struct vhost_poll *poll = n->poll + (nvq - n->vqs); | |
441 | struct socket *sock; | |
442 | ||
247643f8 | 443 | sock = vhost_vq_get_backend(vq); |
8241a1e4 JW |
444 | if (!sock) |
445 | return 0; | |
446 | ||
447 | return vhost_poll_start(poll, sock->file); | |
448 | } | |
449 | ||
4afb52c2 JW |
450 | static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq) |
451 | { | |
452 | struct vhost_virtqueue *vq = &nvq->vq; | |
453 | struct vhost_dev *dev = vq->dev; | |
454 | ||
455 | if (!nvq->done_idx) | |
456 | return; | |
457 | ||
458 | vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx); | |
459 | nvq->done_idx = 0; | |
460 | } | |
461 | ||
0a0be13b JW |
462 | static void vhost_tx_batch(struct vhost_net *net, |
463 | struct vhost_net_virtqueue *nvq, | |
464 | struct socket *sock, | |
465 | struct msghdr *msghdr) | |
466 | { | |
467 | struct tun_msg_ctl ctl = { | |
468 | .type = TUN_MSG_PTR, | |
469 | .num = nvq->batched_xdp, | |
470 | .ptr = nvq->xdp, | |
471 | }; | |
3c4cea8f | 472 | int i, err; |
0a0be13b JW |
473 | |
474 | if (nvq->batched_xdp == 0) | |
475 | goto signal_used; | |
476 | ||
477 | msghdr->msg_control = &ctl; | |
74a335a0 | 478 | msghdr->msg_controllen = sizeof(ctl); |
0a0be13b JW |
479 | err = sock->ops->sendmsg(sock, msghdr, 0); |
480 | if (unlikely(err < 0)) { | |
481 | vq_err(&nvq->vq, "Fail to batch sending packets\n"); | |
3c4cea8f PA |
482 | |
483 | /* free pages owned by XDP; since this is an unlikely error path, | |
484 | * keep it simple and avoid more complex bulk update for the | |
485 | * used pages | |
486 | */ | |
487 | for (i = 0; i < nvq->batched_xdp; ++i) | |
488 | put_page(virt_to_head_page(nvq->xdp[i].data)); | |
489 | nvq->batched_xdp = 0; | |
490 | nvq->done_idx = 0; | |
0a0be13b JW |
491 | return; |
492 | } | |
493 | ||
494 | signal_used: | |
495 | vhost_net_signal_used(nvq); | |
496 | nvq->batched_xdp = 0; | |
497 | } | |
498 | ||
dc151282 TZ |
499 | static int sock_has_rx_data(struct socket *sock) |
500 | { | |
501 | if (unlikely(!sock)) | |
502 | return 0; | |
503 | ||
504 | if (sock->ops->peek_len) | |
505 | return sock->ops->peek_len(sock); | |
506 | ||
507 | return skb_queue_empty(&sock->sk->sk_receive_queue); | |
508 | } | |
509 | ||
510 | static void vhost_net_busy_poll_try_queue(struct vhost_net *net, | |
511 | struct vhost_virtqueue *vq) | |
512 | { | |
513 | if (!vhost_vq_avail_empty(&net->dev, vq)) { | |
514 | vhost_poll_queue(&vq->poll); | |
515 | } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { | |
516 | vhost_disable_notify(&net->dev, vq); | |
517 | vhost_poll_queue(&vq->poll); | |
518 | } | |
519 | } | |
520 | ||
521 | static void vhost_net_busy_poll(struct vhost_net *net, | |
522 | struct vhost_virtqueue *rvq, | |
523 | struct vhost_virtqueue *tvq, | |
524 | bool *busyloop_intr, | |
525 | bool poll_rx) | |
526 | { | |
527 | unsigned long busyloop_timeout; | |
528 | unsigned long endtime; | |
529 | struct socket *sock; | |
530 | struct vhost_virtqueue *vq = poll_rx ? tvq : rvq; | |
531 | ||
476e8ba7 JW |
532 | /* Try to hold the vq mutex of the paired virtqueue. We can't |
533 | * use mutex_lock() here since we could not guarantee a | |
534 | * consistenet lock ordering. | |
535 | */ | |
536 | if (!mutex_trylock(&vq->mutex)) | |
537 | return; | |
538 | ||
dc151282 | 539 | vhost_disable_notify(&net->dev, vq); |
247643f8 | 540 | sock = vhost_vq_get_backend(rvq); |
dc151282 TZ |
541 | |
542 | busyloop_timeout = poll_rx ? rvq->busyloop_timeout: | |
543 | tvq->busyloop_timeout; | |
544 | ||
545 | preempt_disable(); | |
546 | endtime = busy_clock() + busyloop_timeout; | |
547 | ||
548 | while (vhost_can_busy_poll(endtime)) { | |
549 | if (vhost_has_work(&net->dev)) { | |
550 | *busyloop_intr = true; | |
551 | break; | |
552 | } | |
553 | ||
554 | if ((sock_has_rx_data(sock) && | |
555 | !vhost_vq_avail_empty(&net->dev, rvq)) || | |
556 | !vhost_vq_avail_empty(&net->dev, tvq)) | |
557 | break; | |
558 | ||
559 | cpu_relax(); | |
560 | } | |
561 | ||
562 | preempt_enable(); | |
563 | ||
564 | if (poll_rx || sock_has_rx_data(sock)) | |
565 | vhost_net_busy_poll_try_queue(net, vq); | |
566 | else if (!poll_rx) /* On tx here, sock has no rx data. */ | |
567 | vhost_enable_notify(&net->dev, rvq); | |
568 | ||
569 | mutex_unlock(&vq->mutex); | |
570 | } | |
571 | ||
03088137 | 572 | static int vhost_net_tx_get_vq_desc(struct vhost_net *net, |
441abde4 | 573 | struct vhost_net_virtqueue *tnvq, |
027b1760 | 574 | unsigned int *out_num, unsigned int *in_num, |
0a0be13b | 575 | struct msghdr *msghdr, bool *busyloop_intr) |
03088137 | 576 | { |
441abde4 TZ |
577 | struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX]; |
578 | struct vhost_virtqueue *rvq = &rnvq->vq; | |
579 | struct vhost_virtqueue *tvq = &tnvq->vq; | |
580 | ||
581 | int r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov), | |
6b1e6cc7 | 582 | out_num, in_num, NULL, NULL); |
03088137 | 583 | |
441abde4 | 584 | if (r == tvq->num && tvq->busyloop_timeout) { |
0a0be13b | 585 | /* Flush batched packets first */ |
247643f8 EP |
586 | if (!vhost_sock_zcopy(vhost_vq_get_backend(tvq))) |
587 | vhost_tx_batch(net, tnvq, | |
588 | vhost_vq_get_backend(tvq), | |
589 | msghdr); | |
441abde4 TZ |
590 | |
591 | vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false); | |
592 | ||
593 | r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov), | |
6b1e6cc7 | 594 | out_num, in_num, NULL, NULL); |
03088137 JW |
595 | } |
596 | ||
597 | return r; | |
598 | } | |
599 | ||
0ed005ce JW |
600 | static bool vhost_exceeds_maxpend(struct vhost_net *net) |
601 | { | |
602 | struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; | |
603 | struct vhost_virtqueue *vq = &nvq->vq; | |
604 | ||
1e6f7453 WB |
605 | return (nvq->upend_idx + UIO_MAXIOV - nvq->done_idx) % UIO_MAXIOV > |
606 | min_t(unsigned int, VHOST_MAX_PEND, vq->num >> 2); | |
0ed005ce JW |
607 | } |
608 | ||
b0d0ea50 JW |
609 | static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter, |
610 | size_t hdr_size, int out) | |
611 | { | |
612 | /* Skip header. TODO: support TSO. */ | |
613 | size_t len = iov_length(vq->iov, out); | |
614 | ||
de4eda9d | 615 | iov_iter_init(iter, ITER_SOURCE, vq->iov, out, len); |
b0d0ea50 JW |
616 | iov_iter_advance(iter, hdr_size); |
617 | ||
618 | return iov_iter_count(iter); | |
619 | } | |
620 | ||
a2a91a13 JW |
621 | static int get_tx_bufs(struct vhost_net *net, |
622 | struct vhost_net_virtqueue *nvq, | |
623 | struct msghdr *msg, | |
624 | unsigned int *out, unsigned int *in, | |
625 | size_t *len, bool *busyloop_intr) | |
626 | { | |
627 | struct vhost_virtqueue *vq = &nvq->vq; | |
628 | int ret; | |
629 | ||
0a0be13b | 630 | ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr); |
4afb52c2 | 631 | |
a2a91a13 JW |
632 | if (ret < 0 || ret == vq->num) |
633 | return ret; | |
634 | ||
635 | if (*in) { | |
636 | vq_err(vq, "Unexpected descriptor format for TX: out %d, int %d\n", | |
637 | *out, *in); | |
638 | return -EFAULT; | |
639 | } | |
640 | ||
641 | /* Sanity check */ | |
642 | *len = init_iov_iter(vq, &msg->msg_iter, nvq->vhost_hlen, *out); | |
643 | if (*len == 0) { | |
644 | vq_err(vq, "Unexpected header len for TX: %zd expected %zd\n", | |
645 | *len, nvq->vhost_hlen); | |
646 | return -EFAULT; | |
647 | } | |
648 | ||
649 | return ret; | |
650 | } | |
651 | ||
c92a8a8c JW |
652 | static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len) |
653 | { | |
654 | return total_len < VHOST_NET_WEIGHT && | |
655 | !vhost_vq_avail_empty(vq->dev, vq); | |
656 | } | |
657 | ||
e4dab1e6 JW |
658 | static bool vhost_net_page_frag_refill(struct vhost_net *net, unsigned int sz, |
659 | struct page_frag *pfrag, gfp_t gfp) | |
660 | { | |
661 | if (pfrag->page) { | |
662 | if (pfrag->offset + sz <= pfrag->size) | |
663 | return true; | |
664 | __page_frag_cache_drain(pfrag->page, net->refcnt_bias); | |
665 | } | |
666 | ||
667 | pfrag->offset = 0; | |
668 | net->refcnt_bias = 0; | |
669 | if (SKB_FRAG_PAGE_ORDER) { | |
670 | /* Avoid direct reclaim but allow kswapd to wake */ | |
671 | pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) | | |
672 | __GFP_COMP | __GFP_NOWARN | | |
673 | __GFP_NORETRY, | |
674 | SKB_FRAG_PAGE_ORDER); | |
675 | if (likely(pfrag->page)) { | |
676 | pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER; | |
677 | goto done; | |
678 | } | |
679 | } | |
680 | pfrag->page = alloc_page(gfp); | |
681 | if (likely(pfrag->page)) { | |
682 | pfrag->size = PAGE_SIZE; | |
683 | goto done; | |
684 | } | |
685 | return false; | |
686 | ||
687 | done: | |
688 | net->refcnt_bias = USHRT_MAX; | |
689 | page_ref_add(pfrag->page, USHRT_MAX - 1); | |
690 | return true; | |
691 | } | |
692 | ||
0a0be13b JW |
693 | #define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) |
694 | ||
695 | static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, | |
696 | struct iov_iter *from) | |
697 | { | |
698 | struct vhost_virtqueue *vq = &nvq->vq; | |
e4dab1e6 JW |
699 | struct vhost_net *net = container_of(vq->dev, struct vhost_net, |
700 | dev); | |
247643f8 | 701 | struct socket *sock = vhost_vq_get_backend(vq); |
e4dab1e6 | 702 | struct page_frag *alloc_frag = &net->page_frag; |
0a0be13b JW |
703 | struct virtio_net_hdr *gso; |
704 | struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp]; | |
705 | struct tun_xdp_hdr *hdr; | |
706 | size_t len = iov_iter_count(from); | |
707 | int headroom = vhost_sock_xdp(sock) ? XDP_PACKET_HEADROOM : 0; | |
708 | int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
709 | int pad = SKB_DATA_ALIGN(VHOST_NET_RX_PAD + headroom + nvq->sock_hlen); | |
710 | int sock_hlen = nvq->sock_hlen; | |
711 | void *buf; | |
712 | int copied; | |
713 | ||
714 | if (unlikely(len < nvq->sock_hlen)) | |
715 | return -EFAULT; | |
716 | ||
717 | if (SKB_DATA_ALIGN(len + pad) + | |
718 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) | |
719 | return -ENOSPC; | |
720 | ||
721 | buflen += SKB_DATA_ALIGN(len + pad); | |
722 | alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); | |
e4dab1e6 JW |
723 | if (unlikely(!vhost_net_page_frag_refill(net, buflen, |
724 | alloc_frag, GFP_KERNEL))) | |
0a0be13b JW |
725 | return -ENOMEM; |
726 | ||
727 | buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; | |
728 | copied = copy_page_from_iter(alloc_frag->page, | |
729 | alloc_frag->offset + | |
730 | offsetof(struct tun_xdp_hdr, gso), | |
731 | sock_hlen, from); | |
732 | if (copied != sock_hlen) | |
733 | return -EFAULT; | |
734 | ||
735 | hdr = buf; | |
736 | gso = &hdr->gso; | |
737 | ||
738 | if ((gso->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && | |
739 | vhost16_to_cpu(vq, gso->csum_start) + | |
740 | vhost16_to_cpu(vq, gso->csum_offset) + 2 > | |
741 | vhost16_to_cpu(vq, gso->hdr_len)) { | |
742 | gso->hdr_len = cpu_to_vhost16(vq, | |
743 | vhost16_to_cpu(vq, gso->csum_start) + | |
744 | vhost16_to_cpu(vq, gso->csum_offset) + 2); | |
745 | ||
746 | if (vhost16_to_cpu(vq, gso->hdr_len) > len) | |
747 | return -EINVAL; | |
748 | } | |
749 | ||
750 | len -= sock_hlen; | |
751 | copied = copy_page_from_iter(alloc_frag->page, | |
752 | alloc_frag->offset + pad, | |
753 | len, from); | |
754 | if (copied != len) | |
755 | return -EFAULT; | |
756 | ||
224bf7db MC |
757 | xdp_init_buff(xdp, buflen, NULL); |
758 | xdp_prepare_buff(xdp, buf, pad, len, true); | |
0a0be13b JW |
759 | hdr->buflen = buflen; |
760 | ||
e4dab1e6 | 761 | --net->refcnt_bias; |
0a0be13b JW |
762 | alloc_frag->offset += buflen; |
763 | ||
764 | ++nvq->batched_xdp; | |
765 | ||
766 | return 0; | |
767 | } | |
768 | ||
0d20bdf3 | 769 | static void handle_tx_copy(struct vhost_net *net, struct socket *sock) |
3a4d5c94 | 770 | { |
2839400f | 771 | struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; |
81f95a55 | 772 | struct vhost_virtqueue *vq = &nvq->vq; |
98a527aa | 773 | unsigned out, in; |
d5675bd2 | 774 | int head; |
3a4d5c94 MT |
775 | struct msghdr msg = { |
776 | .msg_name = NULL, | |
777 | .msg_namelen = 0, | |
778 | .msg_control = NULL, | |
779 | .msg_controllen = 0, | |
3a4d5c94 MT |
780 | .msg_flags = MSG_DONTWAIT, |
781 | }; | |
782 | size_t len, total_len = 0; | |
70181d51 | 783 | int err; |
a2ac9990 | 784 | int sent_pkts = 0; |
0a0be13b | 785 | bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX); |
28457ee6 | 786 | |
e2412c07 | 787 | do { |
0d20bdf3 | 788 | bool busyloop_intr = false; |
3a4d5c94 | 789 | |
0a0be13b JW |
790 | if (nvq->done_idx == VHOST_NET_BATCH) |
791 | vhost_tx_batch(net, nvq, sock, &msg); | |
792 | ||
0d20bdf3 JW |
793 | head = get_tx_bufs(net, nvq, &msg, &out, &in, &len, |
794 | &busyloop_intr); | |
795 | /* On error, stop handling until the next kick. */ | |
796 | if (unlikely(head < 0)) | |
797 | break; | |
798 | /* Nothing new? Wait for eventfd to tell us they refilled. */ | |
799 | if (head == vq->num) { | |
800 | if (unlikely(busyloop_intr)) { | |
801 | vhost_poll_queue(&vq->poll); | |
802 | } else if (unlikely(vhost_enable_notify(&net->dev, | |
803 | vq))) { | |
804 | vhost_disable_notify(&net->dev, vq); | |
805 | continue; | |
806 | } | |
807 | break; | |
808 | } | |
6b1e6cc7 | 809 | |
0d20bdf3 | 810 | total_len += len; |
0a0be13b JW |
811 | |
812 | /* For simplicity, TX batching is only enabled if | |
813 | * sndbuf is unlimited. | |
814 | */ | |
815 | if (sock_can_batch) { | |
816 | err = vhost_net_build_xdp(nvq, &msg.msg_iter); | |
817 | if (!err) { | |
818 | goto done; | |
819 | } else if (unlikely(err != -ENOSPC)) { | |
820 | vhost_tx_batch(net, nvq, sock, &msg); | |
821 | vhost_discard_vq_desc(vq, 1); | |
822 | vhost_net_enable_vq(net, vq); | |
823 | break; | |
824 | } | |
825 | ||
826 | /* We can't build XDP buff, go for single | |
827 | * packet path but let's flush batched | |
828 | * packets. | |
829 | */ | |
830 | vhost_tx_batch(net, nvq, sock, &msg); | |
831 | msg.msg_control = NULL; | |
832 | } else { | |
833 | if (tx_can_batch(vq, total_len)) | |
834 | msg.msg_flags |= MSG_MORE; | |
835 | else | |
836 | msg.msg_flags &= ~MSG_MORE; | |
837 | } | |
0d20bdf3 | 838 | |
0d20bdf3 JW |
839 | err = sock->ops->sendmsg(sock, &msg, len); |
840 | if (unlikely(err < 0)) { | |
dc9c9e72 YW |
841 | if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) { |
842 | vhost_discard_vq_desc(vq, 1); | |
843 | vhost_net_enable_vq(net, vq); | |
844 | break; | |
845 | } | |
846 | pr_debug("Fail to send packet: err %d", err); | |
847 | } else if (unlikely(err != len)) | |
0d20bdf3 JW |
848 | pr_debug("Truncated TX packet: len %d != %zd\n", |
849 | err, len); | |
0a0be13b JW |
850 | done: |
851 | vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head); | |
852 | vq->heads[nvq->done_idx].len = 0; | |
853 | ++nvq->done_idx; | |
e2412c07 | 854 | } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len))); |
4afb52c2 | 855 | |
0a0be13b | 856 | vhost_tx_batch(net, nvq, sock, &msg); |
0d20bdf3 | 857 | } |
3a4d5c94 | 858 | |
0d20bdf3 JW |
859 | static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) |
860 | { | |
861 | struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; | |
862 | struct vhost_virtqueue *vq = &nvq->vq; | |
863 | unsigned out, in; | |
864 | int head; | |
865 | struct msghdr msg = { | |
866 | .msg_name = NULL, | |
867 | .msg_namelen = 0, | |
868 | .msg_control = NULL, | |
869 | .msg_controllen = 0, | |
870 | .msg_flags = MSG_DONTWAIT, | |
871 | }; | |
fe8dd45b | 872 | struct tun_msg_ctl ctl; |
0d20bdf3 JW |
873 | size_t len, total_len = 0; |
874 | int err; | |
3f649ab7 | 875 | struct vhost_net_ubuf_ref *ubufs; |
dfff202b | 876 | struct ubuf_info_msgzc *ubuf; |
0d20bdf3 JW |
877 | bool zcopy_used; |
878 | int sent_pkts = 0; | |
3a4d5c94 | 879 | |
e2412c07 | 880 | do { |
027b1760 TM |
881 | bool busyloop_intr; |
882 | ||
bab632d6 | 883 | /* Release DMAs done buffers first */ |
0d20bdf3 | 884 | vhost_zerocopy_signal_used(net, vq); |
bab632d6 | 885 | |
027b1760 | 886 | busyloop_intr = false; |
a2a91a13 JW |
887 | head = get_tx_bufs(net, nvq, &msg, &out, &in, &len, |
888 | &busyloop_intr); | |
d5675bd2 | 889 | /* On error, stop handling until the next kick. */ |
7b3384fc | 890 | if (unlikely(head < 0)) |
d5675bd2 | 891 | break; |
3a4d5c94 MT |
892 | /* Nothing new? Wait for eventfd to tell us they refilled. */ |
893 | if (head == vq->num) { | |
027b1760 TM |
894 | if (unlikely(busyloop_intr)) { |
895 | vhost_poll_queue(&vq->poll); | |
896 | } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { | |
8ea8cf89 | 897 | vhost_disable_notify(&net->dev, vq); |
3a4d5c94 MT |
898 | continue; |
899 | } | |
900 | break; | |
901 | } | |
ce21a029 | 902 | |
0d20bdf3 JW |
903 | zcopy_used = len >= VHOST_GOODCOPY_LEN |
904 | && !vhost_exceeds_maxpend(net) | |
905 | && vhost_net_tx_select_zcopy(net); | |
cedb9bdc | 906 | |
bab632d6 | 907 | /* use msg_control to pass vhost zerocopy ubuf info to skb */ |
cedb9bdc | 908 | if (zcopy_used) { |
ce21a029 | 909 | ubuf = nvq->ubuf_info + nvq->upend_idx; |
8b38694a | 910 | vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head); |
ce21a029 | 911 | vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS; |
ce21a029 JW |
912 | ubuf->ctx = nvq->ubufs; |
913 | ubuf->desc = nvq->upend_idx; | |
dfff202b PB |
914 | ubuf->ubuf.callback = vhost_zerocopy_callback; |
915 | ubuf->ubuf.flags = SKBFL_ZEROCOPY_FRAG; | |
916 | refcount_set(&ubuf->ubuf.refcnt, 1); | |
fe8dd45b JW |
917 | msg.msg_control = &ctl; |
918 | ctl.type = TUN_MSG_UBUF; | |
dfff202b | 919 | ctl.ptr = &ubuf->ubuf; |
fe8dd45b | 920 | msg.msg_controllen = sizeof(ctl); |
ce21a029 | 921 | ubufs = nvq->ubufs; |
0ad8b480 | 922 | atomic_inc(&ubufs->refcount); |
2839400f | 923 | nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; |
ce21a029 | 924 | } else { |
4364d5f9 | 925 | msg.msg_control = NULL; |
ce21a029 JW |
926 | ubufs = NULL; |
927 | } | |
0ed005ce | 928 | total_len += len; |
c92a8a8c | 929 | if (tx_can_batch(vq, total_len) && |
0ed005ce JW |
930 | likely(!vhost_exceeds_maxpend(net))) { |
931 | msg.msg_flags |= MSG_MORE; | |
932 | } else { | |
933 | msg.msg_flags &= ~MSG_MORE; | |
934 | } | |
935 | ||
1b784140 | 936 | err = sock->ops->sendmsg(sock, &msg, len); |
3a4d5c94 | 937 | if (unlikely(err < 0)) { |
cedb9bdc | 938 | if (zcopy_used) { |
01e31bea YW |
939 | if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS) |
940 | vhost_net_ubuf_put(ubufs); | |
2839400f AH |
941 | nvq->upend_idx = ((unsigned)nvq->upend_idx - 1) |
942 | % UIO_MAXIOV; | |
bab632d6 | 943 | } |
dc9c9e72 YW |
944 | if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) { |
945 | vhost_discard_vq_desc(vq, 1); | |
946 | vhost_net_enable_vq(net, vq); | |
947 | break; | |
948 | } | |
949 | pr_debug("Fail to send packet: err %d", err); | |
950 | } else if (unlikely(err != len)) | |
95c0ec6a MT |
951 | pr_debug("Truncated TX packet: " |
952 | " len %d != %zd\n", err, len); | |
cedb9bdc | 953 | if (!zcopy_used) |
bab632d6 | 954 | vhost_add_used_and_signal(&net->dev, vq, head, 0); |
c8fb217a | 955 | else |
eaae8132 | 956 | vhost_zerocopy_signal_used(net, vq); |
eaae8132 | 957 | vhost_net_tx_packet(net); |
e2412c07 | 958 | } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len))); |
0d20bdf3 JW |
959 | } |
960 | ||
961 | /* Expects to be always run from workqueue - which acts as | |
962 | * read-size critical section for our kind of RCU. */ | |
963 | static void handle_tx(struct vhost_net *net) | |
964 | { | |
965 | struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; | |
966 | struct vhost_virtqueue *vq = &nvq->vq; | |
967 | struct socket *sock; | |
968 | ||
a6a67a2f | 969 | mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX); |
247643f8 | 970 | sock = vhost_vq_get_backend(vq); |
0d20bdf3 JW |
971 | if (!sock) |
972 | goto out; | |
973 | ||
9b5e830b | 974 | if (!vq_meta_prefetch(vq)) |
0d20bdf3 JW |
975 | goto out; |
976 | ||
977 | vhost_disable_notify(&net->dev, vq); | |
978 | vhost_net_disable_vq(net, vq); | |
979 | ||
980 | if (vhost_sock_zcopy(sock)) | |
981 | handle_tx_zerocopy(net, sock); | |
982 | else | |
983 | handle_tx_copy(net, sock); | |
984 | ||
2e26af79 | 985 | out: |
3a4d5c94 | 986 | mutex_unlock(&vq->mutex); |
3a4d5c94 MT |
987 | } |
988 | ||
c67df11f | 989 | static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk) |
8dd014ad DS |
990 | { |
991 | struct sk_buff *head; | |
992 | int len = 0; | |
783e3988 | 993 | unsigned long flags; |
8dd014ad | 994 | |
5990a305 | 995 | if (rvq->rx_ring) |
c67df11f | 996 | return vhost_net_buf_peek(rvq); |
1576d986 | 997 | |
783e3988 | 998 | spin_lock_irqsave(&sk->sk_receive_queue.lock, flags); |
8dd014ad | 999 | head = skb_peek(&sk->sk_receive_queue); |
c53cff5e | 1000 | if (likely(head)) { |
8dd014ad | 1001 | len = head->len; |
df8a39de | 1002 | if (skb_vlan_tag_present(head)) |
c53cff5e BG |
1003 | len += VLAN_HLEN; |
1004 | } | |
1005 | ||
783e3988 | 1006 | spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags); |
8dd014ad DS |
1007 | return len; |
1008 | } | |
1009 | ||
be294a51 TM |
1010 | static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk, |
1011 | bool *busyloop_intr) | |
03088137 | 1012 | { |
28b9b33b TM |
1013 | struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX]; |
1014 | struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX]; | |
6369fec5 | 1015 | struct vhost_virtqueue *rvq = &rnvq->vq; |
28b9b33b | 1016 | struct vhost_virtqueue *tvq = &tnvq->vq; |
28b9b33b | 1017 | int len = peek_head_len(rnvq, sk); |
03088137 | 1018 | |
dc151282 | 1019 | if (!len && rvq->busyloop_timeout) { |
f5a4941a | 1020 | /* Flush batched heads first */ |
09c32489 | 1021 | vhost_net_signal_used(rnvq); |
03088137 | 1022 | /* Both tx vq and rx socket were polled here */ |
dc151282 | 1023 | vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, true); |
03088137 | 1024 | |
28b9b33b | 1025 | len = peek_head_len(rnvq, sk); |
03088137 JW |
1026 | } |
1027 | ||
1028 | return len; | |
1029 | } | |
1030 | ||
8dd014ad DS |
1031 | /* This is a multi-buffer version of vhost_get_desc, that works if |
1032 | * vq has read descriptors only. | |
1033 | * @vq - the relevant virtqueue | |
1034 | * @datalen - data length we'll be reading | |
1035 | * @iovcount - returned count of io vectors we fill | |
1036 | * @log - vhost log | |
1037 | * @log_num - log offset | |
94249369 | 1038 | * @quota - headcount quota, 1 for big buffer |
8dd014ad DS |
1039 | * returns number of buffer heads allocated, negative on error |
1040 | */ | |
1041 | static int get_rx_bufs(struct vhost_virtqueue *vq, | |
1042 | struct vring_used_elem *heads, | |
1043 | int datalen, | |
1044 | unsigned *iovcount, | |
1045 | struct vhost_log *log, | |
94249369 JW |
1046 | unsigned *log_num, |
1047 | unsigned int quota) | |
8dd014ad DS |
1048 | { |
1049 | unsigned int out, in; | |
1050 | int seg = 0; | |
1051 | int headcount = 0; | |
1052 | unsigned d; | |
1053 | int r, nlogs = 0; | |
8b38694a MT |
1054 | /* len is always initialized before use since we are always called with |
1055 | * datalen > 0. | |
1056 | */ | |
3f649ab7 | 1057 | u32 len; |
8dd014ad | 1058 | |
94249369 | 1059 | while (datalen > 0 && headcount < quota) { |
e0e9b406 | 1060 | if (unlikely(seg >= UIO_MAXIOV)) { |
8dd014ad DS |
1061 | r = -ENOBUFS; |
1062 | goto err; | |
1063 | } | |
47283bef | 1064 | r = vhost_get_vq_desc(vq, vq->iov + seg, |
8dd014ad DS |
1065 | ARRAY_SIZE(vq->iov) - seg, &out, |
1066 | &in, log, log_num); | |
a39ee449 MT |
1067 | if (unlikely(r < 0)) |
1068 | goto err; | |
1069 | ||
1070 | d = r; | |
8dd014ad DS |
1071 | if (d == vq->num) { |
1072 | r = 0; | |
1073 | goto err; | |
1074 | } | |
1075 | if (unlikely(out || in <= 0)) { | |
1076 | vq_err(vq, "unexpected descriptor format for RX: " | |
1077 | "out %d, in %d\n", out, in); | |
1078 | r = -EINVAL; | |
1079 | goto err; | |
1080 | } | |
1081 | if (unlikely(log)) { | |
1082 | nlogs += *log_num; | |
1083 | log += *log_num; | |
1084 | } | |
8b38694a MT |
1085 | heads[headcount].id = cpu_to_vhost32(vq, d); |
1086 | len = iov_length(vq->iov + seg, in); | |
1087 | heads[headcount].len = cpu_to_vhost32(vq, len); | |
1088 | datalen -= len; | |
8dd014ad DS |
1089 | ++headcount; |
1090 | seg += in; | |
1091 | } | |
99975cc6 | 1092 | heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen); |
8dd014ad DS |
1093 | *iovcount = seg; |
1094 | if (unlikely(log)) | |
1095 | *log_num = nlogs; | |
d8316f39 MT |
1096 | |
1097 | /* Detect overrun */ | |
1098 | if (unlikely(datalen > 0)) { | |
1099 | r = UIO_MAXIOV + 1; | |
1100 | goto err; | |
1101 | } | |
8dd014ad DS |
1102 | return headcount; |
1103 | err: | |
1104 | vhost_discard_vq_desc(vq, headcount); | |
1105 | return r; | |
1106 | } | |
1107 | ||
3a4d5c94 MT |
1108 | /* Expects to be always run from workqueue - which acts as |
1109 | * read-size critical section for our kind of RCU. */ | |
94249369 | 1110 | static void handle_rx(struct vhost_net *net) |
3a4d5c94 | 1111 | { |
81f95a55 MT |
1112 | struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX]; |
1113 | struct vhost_virtqueue *vq = &nvq->vq; | |
3f649ab7 | 1114 | unsigned in, log; |
8dd014ad DS |
1115 | struct vhost_log *vq_log; |
1116 | struct msghdr msg = { | |
1117 | .msg_name = NULL, | |
1118 | .msg_namelen = 0, | |
1119 | .msg_control = NULL, /* FIXME: get and handle RX aux data. */ | |
1120 | .msg_controllen = 0, | |
8dd014ad DS |
1121 | .msg_flags = MSG_DONTWAIT, |
1122 | }; | |
0960b641 JW |
1123 | struct virtio_net_hdr hdr = { |
1124 | .flags = 0, | |
1125 | .gso_type = VIRTIO_NET_HDR_GSO_NONE | |
8dd014ad | 1126 | }; |
8dd014ad | 1127 | size_t total_len = 0; |
910a578f | 1128 | int err, mergeable; |
f5a4941a | 1129 | s16 headcount; |
8dd014ad DS |
1130 | size_t vhost_hlen, sock_hlen; |
1131 | size_t vhost_len, sock_len; | |
be294a51 | 1132 | bool busyloop_intr = false; |
2e26af79 | 1133 | struct socket *sock; |
ba7438ae | 1134 | struct iov_iter fixup; |
0960b641 | 1135 | __virtio16 num_buffers; |
db688c24 | 1136 | int recv_pkts = 0; |
8dd014ad | 1137 | |
a6a67a2f | 1138 | mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX); |
247643f8 | 1139 | sock = vhost_vq_get_backend(vq); |
2e26af79 AH |
1140 | if (!sock) |
1141 | goto out; | |
6b1e6cc7 | 1142 | |
9b5e830b | 1143 | if (!vq_meta_prefetch(vq)) |
6b1e6cc7 JW |
1144 | goto out; |
1145 | ||
8ea8cf89 | 1146 | vhost_disable_notify(&net->dev, vq); |
8241a1e4 | 1147 | vhost_net_disable_vq(net, vq); |
2e26af79 | 1148 | |
81f95a55 MT |
1149 | vhost_hlen = nvq->vhost_hlen; |
1150 | sock_hlen = nvq->sock_hlen; | |
8dd014ad | 1151 | |
ea16c514 | 1152 | vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ? |
8dd014ad | 1153 | vq->log : NULL; |
ea16c514 | 1154 | mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF); |
8dd014ad | 1155 | |
e2412c07 JW |
1156 | do { |
1157 | sock_len = vhost_net_rx_peek_head_len(net, sock->sk, | |
1158 | &busyloop_intr); | |
1159 | if (!sock_len) | |
1160 | break; | |
8dd014ad DS |
1161 | sock_len += sock_hlen; |
1162 | vhost_len = sock_len + vhost_hlen; | |
f5a4941a JW |
1163 | headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, |
1164 | vhost_len, &in, vq_log, &log, | |
94249369 | 1165 | likely(mergeable) ? UIO_MAXIOV : 1); |
8dd014ad DS |
1166 | /* On error, stop handling until the next kick. */ |
1167 | if (unlikely(headcount < 0)) | |
8241a1e4 | 1168 | goto out; |
8dd014ad DS |
1169 | /* OK, now we need to know about added descriptors. */ |
1170 | if (!headcount) { | |
6369fec5 TM |
1171 | if (unlikely(busyloop_intr)) { |
1172 | vhost_poll_queue(&vq->poll); | |
1173 | } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { | |
8dd014ad DS |
1174 | /* They have slipped one in as we were |
1175 | * doing that: check again. */ | |
8ea8cf89 | 1176 | vhost_disable_notify(&net->dev, vq); |
8dd014ad DS |
1177 | continue; |
1178 | } | |
1179 | /* Nothing new? Wait for eventfd to tell us | |
1180 | * they refilled. */ | |
8241a1e4 | 1181 | goto out; |
8dd014ad | 1182 | } |
6369fec5 | 1183 | busyloop_intr = false; |
5990a305 | 1184 | if (nvq->rx_ring) |
6e474083 WX |
1185 | msg.msg_control = vhost_net_buf_consume(&nvq->rxq); |
1186 | /* On overrun, truncate and discard */ | |
1187 | if (unlikely(headcount > UIO_MAXIOV)) { | |
de4eda9d | 1188 | iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, 1, 1); |
6e474083 WX |
1189 | err = sock->ops->recvmsg(sock, &msg, |
1190 | 1, MSG_DONTWAIT | MSG_TRUNC); | |
1191 | pr_debug("Discarded rx packet: len %zd\n", sock_len); | |
1192 | continue; | |
1193 | } | |
8dd014ad | 1194 | /* We don't need to be notified again. */ |
de4eda9d | 1195 | iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, in, vhost_len); |
ba7438ae AV |
1196 | fixup = msg.msg_iter; |
1197 | if (unlikely((vhost_hlen))) { | |
1198 | /* We will supply the header ourselves | |
1199 | * TODO: support TSO. | |
1200 | */ | |
1201 | iov_iter_advance(&msg.msg_iter, vhost_hlen); | |
ba7438ae | 1202 | } |
1b784140 | 1203 | err = sock->ops->recvmsg(sock, &msg, |
8dd014ad DS |
1204 | sock_len, MSG_DONTWAIT | MSG_TRUNC); |
1205 | /* Userspace might have consumed the packet meanwhile: | |
1206 | * it's not supposed to do this usually, but might be hard | |
1207 | * to prevent. Discard data we got (if any) and keep going. */ | |
1208 | if (unlikely(err != sock_len)) { | |
1209 | pr_debug("Discarded rx packet: " | |
1210 | " len %d, expected %zd\n", err, sock_len); | |
1211 | vhost_discard_vq_desc(vq, headcount); | |
1212 | continue; | |
1213 | } | |
ba7438ae | 1214 | /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */ |
4c5a8442 MT |
1215 | if (unlikely(vhost_hlen)) { |
1216 | if (copy_to_iter(&hdr, sizeof(hdr), | |
1217 | &fixup) != sizeof(hdr)) { | |
1218 | vq_err(vq, "Unable to write vnet_hdr " | |
1219 | "at addr %p\n", vq->iov->iov_base); | |
8241a1e4 | 1220 | goto out; |
4c5a8442 MT |
1221 | } |
1222 | } else { | |
1223 | /* Header came from socket; we'll need to patch | |
1224 | * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF | |
1225 | */ | |
1226 | iov_iter_advance(&fixup, sizeof(hdr)); | |
8dd014ad DS |
1227 | } |
1228 | /* TODO: Should check and handle checksum. */ | |
5201aa49 | 1229 | |
0960b641 | 1230 | num_buffers = cpu_to_vhost16(vq, headcount); |
cfbdab95 | 1231 | if (likely(mergeable) && |
0d79a493 MT |
1232 | copy_to_iter(&num_buffers, sizeof num_buffers, |
1233 | &fixup) != sizeof num_buffers) { | |
8dd014ad DS |
1234 | vq_err(vq, "Failed num_buffers write"); |
1235 | vhost_discard_vq_desc(vq, headcount); | |
8241a1e4 | 1236 | goto out; |
8dd014ad | 1237 | } |
f5a4941a | 1238 | nvq->done_idx += headcount; |
d0d86971 | 1239 | if (nvq->done_idx > VHOST_NET_BATCH) |
09c32489 | 1240 | vhost_net_signal_used(nvq); |
8dd014ad | 1241 | if (unlikely(vq_log)) |
cc5e7107 JW |
1242 | vhost_log_write(vq, vq_log, log, vhost_len, |
1243 | vq->iov, in); | |
8dd014ad | 1244 | total_len += vhost_len; |
e2412c07 JW |
1245 | } while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len))); |
1246 | ||
be294a51 TM |
1247 | if (unlikely(busyloop_intr)) |
1248 | vhost_poll_queue(&vq->poll); | |
e2412c07 | 1249 | else if (!sock_len) |
be294a51 | 1250 | vhost_net_enable_vq(net, vq); |
2e26af79 | 1251 | out: |
09c32489 | 1252 | vhost_net_signal_used(nvq); |
8dd014ad | 1253 | mutex_unlock(&vq->mutex); |
8dd014ad DS |
1254 | } |
1255 | ||
c23f3445 | 1256 | static void handle_tx_kick(struct vhost_work *work) |
3a4d5c94 | 1257 | { |
c23f3445 TH |
1258 | struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, |
1259 | poll.work); | |
1260 | struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); | |
1261 | ||
3a4d5c94 MT |
1262 | handle_tx(net); |
1263 | } | |
1264 | ||
c23f3445 | 1265 | static void handle_rx_kick(struct vhost_work *work) |
3a4d5c94 | 1266 | { |
c23f3445 TH |
1267 | struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, |
1268 | poll.work); | |
1269 | struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); | |
1270 | ||
3a4d5c94 MT |
1271 | handle_rx(net); |
1272 | } | |
1273 | ||
c23f3445 | 1274 | static void handle_tx_net(struct vhost_work *work) |
3a4d5c94 | 1275 | { |
c23f3445 TH |
1276 | struct vhost_net *net = container_of(work, struct vhost_net, |
1277 | poll[VHOST_NET_VQ_TX].work); | |
3a4d5c94 MT |
1278 | handle_tx(net); |
1279 | } | |
1280 | ||
c23f3445 | 1281 | static void handle_rx_net(struct vhost_work *work) |
3a4d5c94 | 1282 | { |
c23f3445 TH |
1283 | struct vhost_net *net = container_of(work, struct vhost_net, |
1284 | poll[VHOST_NET_VQ_RX].work); | |
3a4d5c94 MT |
1285 | handle_rx(net); |
1286 | } | |
1287 | ||
1288 | static int vhost_net_open(struct inode *inode, struct file *f) | |
1289 | { | |
23cc5a99 | 1290 | struct vhost_net *n; |
c23f3445 | 1291 | struct vhost_dev *dev; |
3ab2e420 | 1292 | struct vhost_virtqueue **vqs; |
5990a305 | 1293 | void **queue; |
0a0be13b | 1294 | struct xdp_buff *xdp; |
59566b6e | 1295 | int i; |
c23f3445 | 1296 | |
dcda9b04 | 1297 | n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL); |
6c5ab651 MH |
1298 | if (!n) |
1299 | return -ENOMEM; | |
6da2ec56 | 1300 | vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL); |
3ab2e420 | 1301 | if (!vqs) { |
d04257b0 | 1302 | kvfree(n); |
3ab2e420 AH |
1303 | return -ENOMEM; |
1304 | } | |
c23f3445 | 1305 | |
d0d86971 | 1306 | queue = kmalloc_array(VHOST_NET_BATCH, sizeof(void *), |
c67df11f JW |
1307 | GFP_KERNEL); |
1308 | if (!queue) { | |
1309 | kfree(vqs); | |
1310 | kvfree(n); | |
1311 | return -ENOMEM; | |
1312 | } | |
1313 | n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue; | |
1314 | ||
0a0be13b JW |
1315 | xdp = kmalloc_array(VHOST_NET_BATCH, sizeof(*xdp), GFP_KERNEL); |
1316 | if (!xdp) { | |
1317 | kfree(vqs); | |
1318 | kvfree(n); | |
1319 | kfree(queue); | |
8a1aff14 | 1320 | return -ENOMEM; |
0a0be13b JW |
1321 | } |
1322 | n->vqs[VHOST_NET_VQ_TX].xdp = xdp; | |
1323 | ||
c23f3445 | 1324 | dev = &n->dev; |
3ab2e420 AH |
1325 | vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq; |
1326 | vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq; | |
1327 | n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick; | |
1328 | n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick; | |
2839400f AH |
1329 | for (i = 0; i < VHOST_NET_VQ_MAX; i++) { |
1330 | n->vqs[i].ubufs = NULL; | |
1331 | n->vqs[i].ubuf_info = NULL; | |
1332 | n->vqs[i].upend_idx = 0; | |
1333 | n->vqs[i].done_idx = 0; | |
0a0be13b | 1334 | n->vqs[i].batched_xdp = 0; |
81f95a55 MT |
1335 | n->vqs[i].vhost_hlen = 0; |
1336 | n->vqs[i].sock_hlen = 0; | |
ab7e34b3 | 1337 | n->vqs[i].rx_ring = NULL; |
c67df11f | 1338 | vhost_net_buf_init(&n->vqs[i].rxq); |
2839400f | 1339 | } |
b46a0bf7 | 1340 | vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX, |
e82b9b07 | 1341 | UIO_MAXIOV + VHOST_NET_BATCH, |
01fcb1cb | 1342 | VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT, true, |
792a4f2e | 1343 | NULL); |
3a4d5c94 | 1344 | |
a9a08845 LT |
1345 | vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev); |
1346 | vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev); | |
3a4d5c94 MT |
1347 | |
1348 | f->private_data = n; | |
e4dab1e6 JW |
1349 | n->page_frag.page = NULL; |
1350 | n->refcnt_bias = 0; | |
3a4d5c94 MT |
1351 | |
1352 | return 0; | |
1353 | } | |
1354 | ||
3a4d5c94 MT |
1355 | static struct socket *vhost_net_stop_vq(struct vhost_net *n, |
1356 | struct vhost_virtqueue *vq) | |
1357 | { | |
1358 | struct socket *sock; | |
c67df11f JW |
1359 | struct vhost_net_virtqueue *nvq = |
1360 | container_of(vq, struct vhost_net_virtqueue, vq); | |
3a4d5c94 MT |
1361 | |
1362 | mutex_lock(&vq->mutex); | |
247643f8 | 1363 | sock = vhost_vq_get_backend(vq); |
3a4d5c94 | 1364 | vhost_net_disable_vq(n, vq); |
247643f8 | 1365 | vhost_vq_set_backend(vq, NULL); |
c67df11f | 1366 | vhost_net_buf_unproduce(nvq); |
303fd71b | 1367 | nvq->rx_ring = NULL; |
3a4d5c94 MT |
1368 | mutex_unlock(&vq->mutex); |
1369 | return sock; | |
1370 | } | |
1371 | ||
1372 | static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock, | |
1373 | struct socket **rx_sock) | |
1374 | { | |
3ab2e420 AH |
1375 | *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq); |
1376 | *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq); | |
3a4d5c94 MT |
1377 | } |
1378 | ||
3a4d5c94 MT |
1379 | static void vhost_net_flush(struct vhost_net *n) |
1380 | { | |
b2ffa407 | 1381 | vhost_dev_flush(&n->dev); |
2839400f | 1382 | if (n->vqs[VHOST_NET_VQ_TX].ubufs) { |
3ab2e420 | 1383 | mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); |
1280c27f | 1384 | n->tx_flush = true; |
3ab2e420 | 1385 | mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); |
1280c27f | 1386 | /* Wait for all lower device DMAs done. */ |
fe729a57 | 1387 | vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); |
3ab2e420 | 1388 | mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); |
1280c27f | 1389 | n->tx_flush = false; |
0ad8b480 | 1390 | atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1); |
3ab2e420 | 1391 | mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); |
1280c27f | 1392 | } |
3a4d5c94 MT |
1393 | } |
1394 | ||
1395 | static int vhost_net_release(struct inode *inode, struct file *f) | |
1396 | { | |
1397 | struct vhost_net *n = f->private_data; | |
1398 | struct socket *tx_sock; | |
1399 | struct socket *rx_sock; | |
1400 | ||
1401 | vhost_net_stop(n, &tx_sock, &rx_sock); | |
1402 | vhost_net_flush(n); | |
b211616d | 1403 | vhost_dev_stop(&n->dev); |
f6f93f75 | 1404 | vhost_dev_cleanup(&n->dev); |
81f95a55 | 1405 | vhost_net_vq_reset(n); |
3a4d5c94 | 1406 | if (tx_sock) |
09aaacf0 | 1407 | sockfd_put(tx_sock); |
3a4d5c94 | 1408 | if (rx_sock) |
09aaacf0 | 1409 | sockfd_put(rx_sock); |
b0c057ca | 1410 | /* Make sure no callbacks are outstanding */ |
d05faa5f | 1411 | synchronize_rcu(); |
3a4d5c94 MT |
1412 | /* We do an extra flush before freeing memory, |
1413 | * since jobs can re-queue themselves. */ | |
1414 | vhost_net_flush(n); | |
c67df11f | 1415 | kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue); |
0a0be13b | 1416 | kfree(n->vqs[VHOST_NET_VQ_TX].xdp); |
3ab2e420 | 1417 | kfree(n->dev.vqs); |
e4dab1e6 JW |
1418 | if (n->page_frag.page) |
1419 | __page_frag_cache_drain(n->page_frag.page, n->refcnt_bias); | |
d04257b0 | 1420 | kvfree(n); |
3a4d5c94 MT |
1421 | return 0; |
1422 | } | |
1423 | ||
1424 | static struct socket *get_raw_socket(int fd) | |
1425 | { | |
9b2c45d4 | 1426 | int r; |
3a4d5c94 | 1427 | struct socket *sock = sockfd_lookup(fd, &r); |
d47effe1 | 1428 | |
3a4d5c94 MT |
1429 | if (!sock) |
1430 | return ERR_PTR(-ENOTSOCK); | |
1431 | ||
1432 | /* Parameter checking */ | |
1433 | if (sock->sk->sk_type != SOCK_RAW) { | |
1434 | r = -ESOCKTNOSUPPORT; | |
1435 | goto err; | |
1436 | } | |
1437 | ||
42d84c84 | 1438 | if (sock->sk->sk_family != AF_PACKET) { |
3a4d5c94 MT |
1439 | r = -EPFNOSUPPORT; |
1440 | goto err; | |
1441 | } | |
1442 | return sock; | |
1443 | err: | |
09aaacf0 | 1444 | sockfd_put(sock); |
3a4d5c94 MT |
1445 | return ERR_PTR(r); |
1446 | } | |
1447 | ||
fb4554c2 | 1448 | static struct ptr_ring *get_tap_ptr_ring(struct file *file) |
c67df11f | 1449 | { |
5990a305 | 1450 | struct ptr_ring *ring; |
5990a305 JW |
1451 | ring = tun_get_tx_ring(file); |
1452 | if (!IS_ERR(ring)) | |
c67df11f | 1453 | goto out; |
5990a305 JW |
1454 | ring = tap_get_ptr_ring(file); |
1455 | if (!IS_ERR(ring)) | |
c67df11f | 1456 | goto out; |
5990a305 | 1457 | ring = NULL; |
c67df11f | 1458 | out: |
5990a305 | 1459 | return ring; |
c67df11f JW |
1460 | } |
1461 | ||
501c774c | 1462 | static struct socket *get_tap_socket(int fd) |
3a4d5c94 MT |
1463 | { |
1464 | struct file *file = fget(fd); | |
1465 | struct socket *sock; | |
d47effe1 | 1466 | |
3a4d5c94 MT |
1467 | if (!file) |
1468 | return ERR_PTR(-EBADF); | |
1469 | sock = tun_get_socket(file); | |
501c774c AB |
1470 | if (!IS_ERR(sock)) |
1471 | return sock; | |
635b8c8e | 1472 | sock = tap_get_socket(file); |
3a4d5c94 MT |
1473 | if (IS_ERR(sock)) |
1474 | fput(file); | |
1475 | return sock; | |
1476 | } | |
1477 | ||
1478 | static struct socket *get_socket(int fd) | |
1479 | { | |
1480 | struct socket *sock; | |
d47effe1 | 1481 | |
3a4d5c94 MT |
1482 | /* special case to disable backend */ |
1483 | if (fd == -1) | |
1484 | return NULL; | |
1485 | sock = get_raw_socket(fd); | |
1486 | if (!IS_ERR(sock)) | |
1487 | return sock; | |
501c774c | 1488 | sock = get_tap_socket(fd); |
3a4d5c94 MT |
1489 | if (!IS_ERR(sock)) |
1490 | return sock; | |
1491 | return ERR_PTR(-ENOTSOCK); | |
1492 | } | |
1493 | ||
1494 | static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | |
1495 | { | |
1496 | struct socket *sock, *oldsock; | |
1497 | struct vhost_virtqueue *vq; | |
2839400f | 1498 | struct vhost_net_virtqueue *nvq; |
fe729a57 | 1499 | struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL; |
3a4d5c94 MT |
1500 | int r; |
1501 | ||
1502 | mutex_lock(&n->dev.mutex); | |
1503 | r = vhost_dev_check_owner(&n->dev); | |
1504 | if (r) | |
1505 | goto err; | |
1506 | ||
1507 | if (index >= VHOST_NET_VQ_MAX) { | |
1508 | r = -ENOBUFS; | |
1509 | goto err; | |
1510 | } | |
3ab2e420 | 1511 | vq = &n->vqs[index].vq; |
2839400f | 1512 | nvq = &n->vqs[index]; |
3a4d5c94 MT |
1513 | mutex_lock(&vq->mutex); |
1514 | ||
9526f9a2 EA |
1515 | if (fd == -1) |
1516 | vhost_clear_msg(&n->dev); | |
1517 | ||
3a4d5c94 MT |
1518 | /* Verify that ring has been setup correctly. */ |
1519 | if (!vhost_vq_access_ok(vq)) { | |
1520 | r = -EFAULT; | |
1dace8c8 | 1521 | goto err_vq; |
3a4d5c94 MT |
1522 | } |
1523 | sock = get_socket(fd); | |
1524 | if (IS_ERR(sock)) { | |
1525 | r = PTR_ERR(sock); | |
1dace8c8 | 1526 | goto err_vq; |
3a4d5c94 MT |
1527 | } |
1528 | ||
1529 | /* start polling new socket */ | |
247643f8 | 1530 | oldsock = vhost_vq_get_backend(vq); |
11fe8839 | 1531 | if (sock != oldsock) { |
fe729a57 AH |
1532 | ubufs = vhost_net_ubuf_alloc(vq, |
1533 | sock && vhost_sock_zcopy(sock)); | |
bab632d6 MT |
1534 | if (IS_ERR(ubufs)) { |
1535 | r = PTR_ERR(ubufs); | |
1536 | goto err_ubufs; | |
1537 | } | |
692a998b | 1538 | |
d47effe1 | 1539 | vhost_net_disable_vq(n, vq); |
247643f8 | 1540 | vhost_vq_set_backend(vq, sock); |
c67df11f | 1541 | vhost_net_buf_unproduce(nvq); |
80f7d030 | 1542 | r = vhost_vq_init_access(vq); |
f59281da | 1543 | if (r) |
692a998b | 1544 | goto err_used; |
2b8b328b JW |
1545 | r = vhost_net_enable_vq(n, vq); |
1546 | if (r) | |
1547 | goto err_used; | |
fb4554c2 AV |
1548 | if (index == VHOST_NET_VQ_RX) { |
1549 | if (sock) | |
1550 | nvq->rx_ring = get_tap_ptr_ring(sock->file); | |
1551 | else | |
1552 | nvq->rx_ring = NULL; | |
1553 | } | |
692a998b | 1554 | |
2839400f AH |
1555 | oldubufs = nvq->ubufs; |
1556 | nvq->ubufs = ubufs; | |
64e9a9b8 MT |
1557 | |
1558 | n->tx_packets = 0; | |
1559 | n->tx_zcopy_err = 0; | |
1280c27f | 1560 | n->tx_flush = false; |
dd1f4078 | 1561 | } |
3a4d5c94 | 1562 | |
1680e906 MT |
1563 | mutex_unlock(&vq->mutex); |
1564 | ||
c047e5f3 | 1565 | if (oldubufs) { |
c38e39c3 | 1566 | vhost_net_ubuf_put_wait_and_free(oldubufs); |
c047e5f3 | 1567 | mutex_lock(&vq->mutex); |
eaae8132 | 1568 | vhost_zerocopy_signal_used(n, vq); |
c047e5f3 MT |
1569 | mutex_unlock(&vq->mutex); |
1570 | } | |
bab632d6 | 1571 | |
3a4d5c94 | 1572 | if (oldsock) { |
b2ffa407 | 1573 | vhost_dev_flush(&n->dev); |
09aaacf0 | 1574 | sockfd_put(oldsock); |
3a4d5c94 | 1575 | } |
1dace8c8 | 1576 | |
1680e906 MT |
1577 | mutex_unlock(&n->dev.mutex); |
1578 | return 0; | |
1579 | ||
692a998b | 1580 | err_used: |
247643f8 | 1581 | vhost_vq_set_backend(vq, oldsock); |
692a998b JW |
1582 | vhost_net_enable_vq(n, vq); |
1583 | if (ubufs) | |
c38e39c3 | 1584 | vhost_net_ubuf_put_wait_and_free(ubufs); |
bab632d6 | 1585 | err_ubufs: |
b8f1f658 JW |
1586 | if (sock) |
1587 | sockfd_put(sock); | |
1dace8c8 JD |
1588 | err_vq: |
1589 | mutex_unlock(&vq->mutex); | |
3a4d5c94 MT |
1590 | err: |
1591 | mutex_unlock(&n->dev.mutex); | |
1592 | return r; | |
1593 | } | |
1594 | ||
1595 | static long vhost_net_reset_owner(struct vhost_net *n) | |
1596 | { | |
1597 | struct socket *tx_sock = NULL; | |
1598 | struct socket *rx_sock = NULL; | |
1599 | long err; | |
0bbe3066 | 1600 | struct vhost_iotlb *umem; |
d47effe1 | 1601 | |
3a4d5c94 MT |
1602 | mutex_lock(&n->dev.mutex); |
1603 | err = vhost_dev_check_owner(&n->dev); | |
1604 | if (err) | |
1605 | goto done; | |
a9709d68 JW |
1606 | umem = vhost_dev_reset_owner_prepare(); |
1607 | if (!umem) { | |
150b9e51 MT |
1608 | err = -ENOMEM; |
1609 | goto done; | |
1610 | } | |
3a4d5c94 MT |
1611 | vhost_net_stop(n, &tx_sock, &rx_sock); |
1612 | vhost_net_flush(n); | |
4cd87951 | 1613 | vhost_dev_stop(&n->dev); |
a9709d68 | 1614 | vhost_dev_reset_owner(&n->dev, umem); |
81f95a55 | 1615 | vhost_net_vq_reset(n); |
3a4d5c94 MT |
1616 | done: |
1617 | mutex_unlock(&n->dev.mutex); | |
1618 | if (tx_sock) | |
09aaacf0 | 1619 | sockfd_put(tx_sock); |
3a4d5c94 | 1620 | if (rx_sock) |
09aaacf0 | 1621 | sockfd_put(rx_sock); |
3a4d5c94 MT |
1622 | return err; |
1623 | } | |
1624 | ||
1625 | static int vhost_net_set_features(struct vhost_net *n, u64 features) | |
1626 | { | |
8dd014ad | 1627 | size_t vhost_hlen, sock_hlen, hdr_len; |
3a4d5c94 | 1628 | int i; |
8dd014ad | 1629 | |
e4fca7d6 MT |
1630 | hdr_len = (features & ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | |
1631 | (1ULL << VIRTIO_F_VERSION_1))) ? | |
8dd014ad DS |
1632 | sizeof(struct virtio_net_hdr_mrg_rxbuf) : |
1633 | sizeof(struct virtio_net_hdr); | |
1634 | if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) { | |
1635 | /* vhost provides vnet_hdr */ | |
1636 | vhost_hlen = hdr_len; | |
1637 | sock_hlen = 0; | |
1638 | } else { | |
1639 | /* socket provides vnet_hdr */ | |
1640 | vhost_hlen = 0; | |
1641 | sock_hlen = hdr_len; | |
1642 | } | |
3a4d5c94 MT |
1643 | mutex_lock(&n->dev.mutex); |
1644 | if ((features & (1 << VHOST_F_LOG_ALL)) && | |
6b1e6cc7 JW |
1645 | !vhost_log_access_ok(&n->dev)) |
1646 | goto out_unlock; | |
1647 | ||
321bd212 | 1648 | if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) { |
759aba1e | 1649 | if (vhost_init_device_iotlb(&n->dev)) |
6b1e6cc7 | 1650 | goto out_unlock; |
3a4d5c94 | 1651 | } |
6b1e6cc7 | 1652 | |
3a4d5c94 | 1653 | for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { |
3ab2e420 | 1654 | mutex_lock(&n->vqs[i].vq.mutex); |
ea16c514 | 1655 | n->vqs[i].vq.acked_features = features; |
81f95a55 MT |
1656 | n->vqs[i].vhost_hlen = vhost_hlen; |
1657 | n->vqs[i].sock_hlen = sock_hlen; | |
3ab2e420 | 1658 | mutex_unlock(&n->vqs[i].vq.mutex); |
3a4d5c94 | 1659 | } |
3a4d5c94 MT |
1660 | mutex_unlock(&n->dev.mutex); |
1661 | return 0; | |
6b1e6cc7 JW |
1662 | |
1663 | out_unlock: | |
1664 | mutex_unlock(&n->dev.mutex); | |
1665 | return -EFAULT; | |
3a4d5c94 MT |
1666 | } |
1667 | ||
b1ad8496 AH |
1668 | static long vhost_net_set_owner(struct vhost_net *n) |
1669 | { | |
1670 | int r; | |
1671 | ||
1672 | mutex_lock(&n->dev.mutex); | |
05c05351 MT |
1673 | if (vhost_dev_has_owner(&n->dev)) { |
1674 | r = -EBUSY; | |
1675 | goto out; | |
1676 | } | |
b1ad8496 AH |
1677 | r = vhost_net_set_ubuf_info(n); |
1678 | if (r) | |
1679 | goto out; | |
1680 | r = vhost_dev_set_owner(&n->dev); | |
1681 | if (r) | |
1682 | vhost_net_clear_ubuf_info(n); | |
1683 | vhost_net_flush(n); | |
1684 | out: | |
1685 | mutex_unlock(&n->dev.mutex); | |
1686 | return r; | |
1687 | } | |
1688 | ||
3a4d5c94 MT |
1689 | static long vhost_net_ioctl(struct file *f, unsigned int ioctl, |
1690 | unsigned long arg) | |
1691 | { | |
1692 | struct vhost_net *n = f->private_data; | |
1693 | void __user *argp = (void __user *)arg; | |
1694 | u64 __user *featurep = argp; | |
1695 | struct vhost_vring_file backend; | |
1696 | u64 features; | |
1697 | int r; | |
d47effe1 | 1698 | |
3a4d5c94 MT |
1699 | switch (ioctl) { |
1700 | case VHOST_NET_SET_BACKEND: | |
d3553a52 TY |
1701 | if (copy_from_user(&backend, argp, sizeof backend)) |
1702 | return -EFAULT; | |
3a4d5c94 MT |
1703 | return vhost_net_set_backend(n, backend.index, backend.fd); |
1704 | case VHOST_GET_FEATURES: | |
0dd05a3b | 1705 | features = VHOST_NET_FEATURES; |
d3553a52 TY |
1706 | if (copy_to_user(featurep, &features, sizeof features)) |
1707 | return -EFAULT; | |
1708 | return 0; | |
3a4d5c94 | 1709 | case VHOST_SET_FEATURES: |
d3553a52 TY |
1710 | if (copy_from_user(&features, featurep, sizeof features)) |
1711 | return -EFAULT; | |
0dd05a3b | 1712 | if (features & ~VHOST_NET_FEATURES) |
3a4d5c94 MT |
1713 | return -EOPNOTSUPP; |
1714 | return vhost_net_set_features(n, features); | |
429711ae JW |
1715 | case VHOST_GET_BACKEND_FEATURES: |
1716 | features = VHOST_NET_BACKEND_FEATURES; | |
1717 | if (copy_to_user(featurep, &features, sizeof(features))) | |
1718 | return -EFAULT; | |
1719 | return 0; | |
1720 | case VHOST_SET_BACKEND_FEATURES: | |
1721 | if (copy_from_user(&features, featurep, sizeof(features))) | |
1722 | return -EFAULT; | |
1723 | if (features & ~VHOST_NET_BACKEND_FEATURES) | |
1724 | return -EOPNOTSUPP; | |
460f7ce1 JW |
1725 | vhost_set_backend_features(&n->dev, features); |
1726 | return 0; | |
3a4d5c94 MT |
1727 | case VHOST_RESET_OWNER: |
1728 | return vhost_net_reset_owner(n); | |
b1ad8496 AH |
1729 | case VHOST_SET_OWNER: |
1730 | return vhost_net_set_owner(n); | |
3a4d5c94 MT |
1731 | default: |
1732 | mutex_lock(&n->dev.mutex); | |
935cdee7 MT |
1733 | r = vhost_dev_ioctl(&n->dev, ioctl, argp); |
1734 | if (r == -ENOIOCTLCMD) | |
1735 | r = vhost_vring_ioctl(&n->dev, ioctl, argp); | |
1736 | else | |
1737 | vhost_net_flush(n); | |
3a4d5c94 MT |
1738 | mutex_unlock(&n->dev.mutex); |
1739 | return r; | |
1740 | } | |
1741 | } | |
1742 | ||
6b1e6cc7 JW |
1743 | static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) |
1744 | { | |
1745 | struct file *file = iocb->ki_filp; | |
1746 | struct vhost_net *n = file->private_data; | |
1747 | struct vhost_dev *dev = &n->dev; | |
1748 | int noblock = file->f_flags & O_NONBLOCK; | |
1749 | ||
1750 | return vhost_chr_read_iter(dev, to, noblock); | |
1751 | } | |
1752 | ||
1753 | static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb, | |
1754 | struct iov_iter *from) | |
1755 | { | |
1756 | struct file *file = iocb->ki_filp; | |
1757 | struct vhost_net *n = file->private_data; | |
1758 | struct vhost_dev *dev = &n->dev; | |
1759 | ||
1760 | return vhost_chr_write_iter(dev, from); | |
1761 | } | |
1762 | ||
afc9a42b | 1763 | static __poll_t vhost_net_chr_poll(struct file *file, poll_table *wait) |
6b1e6cc7 JW |
1764 | { |
1765 | struct vhost_net *n = file->private_data; | |
1766 | struct vhost_dev *dev = &n->dev; | |
1767 | ||
1768 | return vhost_chr_poll(file, dev, wait); | |
1769 | } | |
1770 | ||
373a83a6 | 1771 | static const struct file_operations vhost_net_fops = { |
3a4d5c94 MT |
1772 | .owner = THIS_MODULE, |
1773 | .release = vhost_net_release, | |
6b1e6cc7 JW |
1774 | .read_iter = vhost_net_chr_read_iter, |
1775 | .write_iter = vhost_net_chr_write_iter, | |
1776 | .poll = vhost_net_chr_poll, | |
3a4d5c94 | 1777 | .unlocked_ioctl = vhost_net_ioctl, |
407e9ef7 | 1778 | .compat_ioctl = compat_ptr_ioctl, |
3a4d5c94 | 1779 | .open = vhost_net_open, |
6038f373 | 1780 | .llseek = noop_llseek, |
3a4d5c94 MT |
1781 | }; |
1782 | ||
1783 | static struct miscdevice vhost_net_misc = { | |
7c7c7f01 | 1784 | .minor = VHOST_NET_MINOR, |
1785 | .name = "vhost-net", | |
1786 | .fops = &vhost_net_fops, | |
3a4d5c94 MT |
1787 | }; |
1788 | ||
078adb3b | 1789 | static int __init vhost_net_init(void) |
3a4d5c94 | 1790 | { |
bab632d6 | 1791 | if (experimental_zcopytx) |
fe729a57 | 1792 | vhost_net_enable_zcopy(VHOST_NET_VQ_TX); |
c23f3445 | 1793 | return misc_register(&vhost_net_misc); |
3a4d5c94 MT |
1794 | } |
1795 | module_init(vhost_net_init); | |
1796 | ||
078adb3b | 1797 | static void __exit vhost_net_exit(void) |
3a4d5c94 MT |
1798 | { |
1799 | misc_deregister(&vhost_net_misc); | |
3a4d5c94 MT |
1800 | } |
1801 | module_exit(vhost_net_exit); | |
1802 | ||
1803 | MODULE_VERSION("0.0.1"); | |
1804 | MODULE_LICENSE("GPL v2"); | |
1805 | MODULE_AUTHOR("Michael S. Tsirkin"); | |
1806 | MODULE_DESCRIPTION("Host kernel accelerator for virtio net"); | |
7c7c7f01 | 1807 | MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR); |
1808 | MODULE_ALIAS("devname:vhost-net"); |