vdpa/mlx5: Add support for running with virtio_vdpa
[linux-block.git] / drivers / net / virtio_net.c
CommitLineData
1ccea77e 1// SPDX-License-Identifier: GPL-2.0-or-later
48925e37 2/* A network driver using virtio.
296f96fc
RR
3 *
4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
296f96fc
RR
5 */
6//#define DEBUG
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>
a9ea3fc6 9#include <linux/ethtool.h>
296f96fc
RR
10#include <linux/module.h>
11#include <linux/virtio.h>
12#include <linux/virtio_net.h>
f600b690 13#include <linux/bpf.h>
a67edbf4 14#include <linux/bpf_trace.h>
296f96fc 15#include <linux/scatterlist.h>
e918085a 16#include <linux/if_vlan.h>
5a0e3ad6 17#include <linux/slab.h>
8de4b2f3 18#include <linux/cpu.h>
ab7db917 19#include <linux/average.h>
186b3c99 20#include <linux/filter.h>
2ca653d6 21#include <linux/kernel.h>
d85b758f 22#include <net/route.h>
754b8a21 23#include <net/xdp.h>
ba5e4426 24#include <net/net_failover.h>
296f96fc 25
d34710e3 26static int napi_weight = NAPI_POLL_WEIGHT;
6c0cd7c0
DL
27module_param(napi_weight, int, 0444);
28
31c03aef 29static bool csum = true, gso = true, napi_tx = true;
34a48579
RR
30module_param(csum, bool, 0444);
31module_param(gso, bool, 0444);
b92f1e67 32module_param(napi_tx, bool, 0644);
34a48579 33
296f96fc 34/* FIXME: MTU in config. */
5061de36 35#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
3f2c31d9 36#define GOOD_COPY_LEN 128
296f96fc 37
f6b10209
JW
38#define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
39
2de2f7f4
JF
40/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
41#define VIRTIO_XDP_HEADROOM 256
42
2471c75e
JDB
43/* Separating two types of XDP xmit */
44#define VIRTIO_XDP_TX BIT(0)
45#define VIRTIO_XDP_REDIR BIT(1)
46
5050471d
TM
47#define VIRTIO_XDP_FLAG BIT(0)
48
5377d758
JB
49/* RX packet size EWMA. The average packet size is used to determine the packet
50 * buffer size when refilling RX rings. As the entire RX ring may be refilled
51 * at once, the weight is chosen so that the EWMA will be insensitive to short-
52 * term, transient changes in packet size.
ab7db917 53 */
eb1e011a 54DECLARE_EWMA(pkt_len, 0, 64)
ab7db917 55
66846048 56#define VIRTNET_DRIVER_VERSION "1.0.0"
2a41f71d 57
7acd4329
CIK
58static const unsigned long guest_offloads[] = {
59 VIRTIO_NET_F_GUEST_TSO4,
60 VIRTIO_NET_F_GUEST_TSO6,
61 VIRTIO_NET_F_GUEST_ECN,
e59ff2c4
JW
62 VIRTIO_NET_F_GUEST_UFO,
63 VIRTIO_NET_F_GUEST_CSUM
7acd4329 64};
3f93522f 65
1a03b8a3
TZ
66#define GUEST_OFFLOAD_LRO_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
67 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
68 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
69 (1ULL << VIRTIO_NET_F_GUEST_UFO))
70
d7dfc5cf
TM
71struct virtnet_stat_desc {
72 char desc[ETH_GSTRING_LEN];
73 size_t offset;
3fa2a1df 74};
75
d7dfc5cf
TM
76struct virtnet_sq_stats {
77 struct u64_stats_sync syncp;
78 u64 packets;
79 u64 bytes;
5b8f3c8d
TM
80 u64 xdp_tx;
81 u64 xdp_tx_drops;
461f03dc 82 u64 kicks;
d7dfc5cf
TM
83};
84
d46eeeaf
JW
85struct virtnet_rq_stats {
86 struct u64_stats_sync syncp;
d7dfc5cf
TM
87 u64 packets;
88 u64 bytes;
2c4a2f7d 89 u64 drops;
5b8f3c8d
TM
90 u64 xdp_packets;
91 u64 xdp_tx;
92 u64 xdp_redirects;
93 u64 xdp_drops;
461f03dc 94 u64 kicks;
d7dfc5cf
TM
95};
96
97#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
d46eeeaf 98#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m)
d7dfc5cf
TM
99
100static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
5b8f3c8d
TM
101 { "packets", VIRTNET_SQ_STAT(packets) },
102 { "bytes", VIRTNET_SQ_STAT(bytes) },
103 { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) },
104 { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) },
461f03dc 105 { "kicks", VIRTNET_SQ_STAT(kicks) },
d7dfc5cf
TM
106};
107
108static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
5b8f3c8d
TM
109 { "packets", VIRTNET_RQ_STAT(packets) },
110 { "bytes", VIRTNET_RQ_STAT(bytes) },
111 { "drops", VIRTNET_RQ_STAT(drops) },
112 { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) },
113 { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) },
114 { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) },
115 { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) },
461f03dc 116 { "kicks", VIRTNET_RQ_STAT(kicks) },
d7dfc5cf
TM
117};
118
119#define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc)
120#define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc)
121
e9d7417b
JW
122/* Internal representation of a send virtqueue */
123struct send_queue {
124 /* Virtqueue associated with this send _queue */
125 struct virtqueue *vq;
126
127 /* TX: fragments + linear part + virtio header */
128 struct scatterlist sg[MAX_SKB_FRAGS + 2];
986a4f4d
JW
129
130 /* Name of the send queue: output.$index */
131 char name[40];
b92f1e67 132
d7dfc5cf
TM
133 struct virtnet_sq_stats stats;
134
b92f1e67 135 struct napi_struct napi;
e9d7417b
JW
136};
137
138/* Internal representation of a receive virtqueue */
139struct receive_queue {
140 /* Virtqueue associated with this receive_queue */
141 struct virtqueue *vq;
142
296f96fc
RR
143 struct napi_struct napi;
144
f600b690
JF
145 struct bpf_prog __rcu *xdp_prog;
146
d7dfc5cf
TM
147 struct virtnet_rq_stats stats;
148
e9d7417b
JW
149 /* Chain pages by the private ptr. */
150 struct page *pages;
151
ab7db917 152 /* Average packet length for mergeable receive buffers. */
5377d758 153 struct ewma_pkt_len mrg_avg_pkt_len;
ab7db917 154
fb51879d
MD
155 /* Page frag for packet buffer allocation. */
156 struct page_frag alloc_frag;
157
e9d7417b
JW
158 /* RX: fragments + linear part + virtio header */
159 struct scatterlist sg[MAX_SKB_FRAGS + 2];
986a4f4d 160
d85b758f
MT
161 /* Min single buffer size for mergeable buffers case. */
162 unsigned int min_buf_len;
163
986a4f4d
JW
164 /* Name of this receive queue: input.$index */
165 char name[40];
754b8a21
JDB
166
167 struct xdp_rxq_info xdp_rxq;
e9d7417b
JW
168};
169
12e57169
MT
170/* Control VQ buffers: protected by the rtnl lock */
171struct control_buf {
172 struct virtio_net_ctrl_hdr hdr;
173 virtio_net_ctrl_ack status;
174 struct virtio_net_ctrl_mq mq;
175 u8 promisc;
176 u8 allmulti;
d7fad4c8 177 __virtio16 vid;
f4ee703a 178 __virtio64 offloads;
12e57169
MT
179};
180
e9d7417b
JW
181struct virtnet_info {
182 struct virtio_device *vdev;
183 struct virtqueue *cvq;
184 struct net_device *dev;
986a4f4d
JW
185 struct send_queue *sq;
186 struct receive_queue *rq;
e9d7417b
JW
187 unsigned int status;
188
986a4f4d
JW
189 /* Max # of queue pairs supported by the device */
190 u16 max_queue_pairs;
191
192 /* # of queue pairs currently used by the driver */
193 u16 curr_queue_pairs;
194
672aafd5
JF
195 /* # of XDP queue pairs currently used by the driver */
196 u16 xdp_queue_pairs;
197
97c2c69e
XZ
198 /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
199 bool xdp_enabled;
200
97402b96
HX
201 /* I like... big packets and I cannot lie! */
202 bool big_packets;
203
3f2c31d9
MM
204 /* Host will merge rx buffers for big packets (shake it! shake it!) */
205 bool mergeable_rx_bufs;
206
986a4f4d
JW
207 /* Has control virtqueue */
208 bool has_cvq;
209
e7428e95
MT
210 /* Host can handle any s/g split between our header and packet data */
211 bool any_header_sg;
212
012873d0
MT
213 /* Packet virtio header size */
214 u8 hdr_len;
215
3161e453
RR
216 /* Work struct for refilling if we run low on memory. */
217 struct delayed_work refill;
218
586d17c5
JW
219 /* Work struct for config space updates */
220 struct work_struct config_work;
221
986a4f4d
JW
222 /* Does the affinity hint is set for virtqueues? */
223 bool affinity_hint_set;
47be2479 224
8017c279
SAS
225 /* CPU hotplug instances for online & dead */
226 struct hlist_node node;
227 struct hlist_node node_dead;
2ac46030 228
12e57169 229 struct control_buf *ctrl;
16032be5
NA
230
231 /* Ethtool settings */
232 u8 duplex;
233 u32 speed;
3f93522f
JW
234
235 unsigned long guest_offloads;
a02e8964 236 unsigned long guest_offloads_capable;
ba5e4426
SS
237
238 /* failover when STANDBY feature enabled */
239 struct failover *failover;
296f96fc
RR
240};
241
9ab86bbc 242struct padded_vnet_hdr {
012873d0 243 struct virtio_net_hdr_mrg_rxbuf hdr;
9ab86bbc 244 /*
012873d0
MT
245 * hdr is in a separate sg buffer, and data sg buffer shares same page
246 * with this header sg. This padding makes next sg 16 byte aligned
247 * after the header.
9ab86bbc 248 */
012873d0 249 char padding[4];
9ab86bbc
SM
250};
251
5050471d
TM
252static bool is_xdp_frame(void *ptr)
253{
254 return (unsigned long)ptr & VIRTIO_XDP_FLAG;
255}
256
257static void *xdp_to_ptr(struct xdp_frame *ptr)
258{
259 return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
260}
261
262static struct xdp_frame *ptr_to_xdp(void *ptr)
263{
264 return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
265}
266
986a4f4d
JW
267/* Converting between virtqueue no. and kernel tx/rx queue no.
268 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
269 */
270static int vq2txq(struct virtqueue *vq)
271{
9d0ca6ed 272 return (vq->index - 1) / 2;
986a4f4d
JW
273}
274
275static int txq2vq(int txq)
276{
277 return txq * 2 + 1;
278}
279
280static int vq2rxq(struct virtqueue *vq)
281{
9d0ca6ed 282 return vq->index / 2;
986a4f4d
JW
283}
284
285static int rxq2vq(int rxq)
286{
287 return rxq * 2;
288}
289
012873d0 290static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
296f96fc 291{
012873d0 292 return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
296f96fc
RR
293}
294
9ab86bbc
SM
295/*
296 * private is used to chain pages for big packets, put the whole
297 * most recent used list in the beginning for reuse
298 */
e9d7417b 299static void give_pages(struct receive_queue *rq, struct page *page)
0a888fd1 300{
9ab86bbc 301 struct page *end;
0a888fd1 302
e9d7417b 303 /* Find end of list, sew whole thing into vi->rq.pages. */
9ab86bbc 304 for (end = page; end->private; end = (struct page *)end->private);
e9d7417b
JW
305 end->private = (unsigned long)rq->pages;
306 rq->pages = page;
0a888fd1
MM
307}
308
e9d7417b 309static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
fb6813f4 310{
e9d7417b 311 struct page *p = rq->pages;
fb6813f4 312
9ab86bbc 313 if (p) {
e9d7417b 314 rq->pages = (struct page *)p->private;
9ab86bbc
SM
315 /* clear private here, it is used to chain pages */
316 p->private = 0;
317 } else
fb6813f4
RR
318 p = alloc_page(gfp_mask);
319 return p;
320}
321
e4e8452a
WB
322static void virtqueue_napi_schedule(struct napi_struct *napi,
323 struct virtqueue *vq)
324{
325 if (napi_schedule_prep(napi)) {
326 virtqueue_disable_cb(vq);
327 __napi_schedule(napi);
328 }
329}
330
331static void virtqueue_napi_complete(struct napi_struct *napi,
332 struct virtqueue *vq, int processed)
333{
334 int opaque;
335
336 opaque = virtqueue_enable_cb_prepare(vq);
fdaa767a
TM
337 if (napi_complete_done(napi, processed)) {
338 if (unlikely(virtqueue_poll(vq, opaque)))
339 virtqueue_napi_schedule(napi, vq);
340 } else {
341 virtqueue_disable_cb(vq);
342 }
e4e8452a
WB
343}
344
e9d7417b 345static void skb_xmit_done(struct virtqueue *vq)
296f96fc 346{
e9d7417b 347 struct virtnet_info *vi = vq->vdev->priv;
b92f1e67 348 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
296f96fc 349
2cb9c6ba 350 /* Suppress further interrupts. */
e9d7417b 351 virtqueue_disable_cb(vq);
11a3a154 352
b92f1e67
WB
353 if (napi->weight)
354 virtqueue_napi_schedule(napi, vq);
355 else
356 /* We were probably waiting for more output buffers. */
357 netif_wake_subqueue(vi->dev, vq2txq(vq));
296f96fc
RR
358}
359
28b39bc7
JW
360#define MRG_CTX_HEADER_SHIFT 22
361static void *mergeable_len_to_ctx(unsigned int truesize,
362 unsigned int headroom)
363{
364 return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
365}
366
367static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
368{
369 return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
370}
371
372static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
373{
374 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
375}
376
3464645a 377/* Called from bottom half context */
946fa564
MT
378static struct sk_buff *page_to_skb(struct virtnet_info *vi,
379 struct receive_queue *rq,
2613af0e 380 struct page *page, unsigned int offset,
436c9453 381 unsigned int len, unsigned int truesize,
fb32856b 382 bool hdr_valid, unsigned int metasize,
6c66c147 383 bool whole_page)
9ab86bbc
SM
384{
385 struct sk_buff *skb;
012873d0 386 struct virtio_net_hdr_mrg_rxbuf *hdr;
2613af0e 387 unsigned int copy, hdr_len, hdr_padded_len;
af39c8f7 388 struct page *page_to_free = NULL;
fb32856b 389 int tailroom, shinfo_size;
f80bd740 390 char *p, *hdr_p, *buf;
fb6813f4 391
2613af0e 392 p = page_address(page) + offset;
fb32856b 393 hdr_p = p;
3f2c31d9 394
012873d0
MT
395 hdr_len = vi->hdr_len;
396 if (vi->mergeable_rx_bufs)
a4a76503 397 hdr_padded_len = sizeof(*hdr);
012873d0 398 else
2613af0e 399 hdr_padded_len = sizeof(struct padded_vnet_hdr);
3f2c31d9 400
6c66c147 401 /* If whole_page, there is an offset between the beginning of the
fb32856b
XZ
402 * data and the allocated space, otherwise the data and the allocated
403 * space are aligned.
8fb7da9e
XZ
404 *
405 * Buffers with headroom use PAGE_SIZE as alloc size, see
406 * add_recvbuf_mergeable() + get_mergeable_buf_len()
fb32856b 407 */
6c66c147
XZ
408 if (whole_page) {
409 /* Buffers with whole_page use PAGE_SIZE as alloc size,
f80bd740
XZ
410 * see add_recvbuf_mergeable() + get_mergeable_buf_len()
411 */
fb32856b 412 truesize = PAGE_SIZE;
7bf64460
XZ
413
414 /* page maybe head page, so we should get the buf by p, not the
415 * page
416 */
417 tailroom = truesize - len - offset_in_page(p);
418 buf = (char *)((unsigned long)p & PAGE_MASK);
fb32856b
XZ
419 } else {
420 tailroom = truesize - len;
f80bd740 421 buf = p;
fb32856b 422 }
3f2c31d9 423
9ab86bbc 424 len -= hdr_len;
2613af0e
MD
425 offset += hdr_padded_len;
426 p += hdr_padded_len;
3f2c31d9 427
fb32856b
XZ
428 shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
429
f80bd740 430 /* copy small packet so we can reuse these pages */
f5d7872a 431 if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
f80bd740 432 skb = build_skb(buf, truesize);
fb32856b
XZ
433 if (unlikely(!skb))
434 return NULL;
435
f80bd740 436 skb_reserve(skb, p - buf);
fb32856b
XZ
437 skb_put(skb, len);
438 goto ok;
439 }
440
441 /* copy small packet so we can reuse these pages for small data */
442 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
443 if (unlikely(!skb))
444 return NULL;
445
0f6925b3
ED
446 /* Copy all frame if it fits skb->head, otherwise
447 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
448 */
449 if (len <= skb_tailroom(skb))
450 copy = len;
451 else
452 copy = ETH_HLEN + metasize;
59ae1d12 453 skb_put_data(skb, p, copy);
3f2c31d9 454
9ab86bbc
SM
455 len -= copy;
456 offset += copy;
3f2c31d9 457
2613af0e
MD
458 if (vi->mergeable_rx_bufs) {
459 if (len)
460 skb_add_rx_frag(skb, 0, page, offset, len, truesize);
461 else
af39c8f7 462 page_to_free = page;
fb32856b 463 goto ok;
2613af0e
MD
464 }
465
e878d78b
SL
466 /*
467 * Verify that we can indeed put this data into a skb.
468 * This is here to handle cases when the device erroneously
469 * tries to receive more than is possible. This is usually
470 * the case of a broken device.
471 */
472 if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
be443899 473 net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
e878d78b
SL
474 dev_kfree_skb(skb);
475 return NULL;
476 }
2613af0e 477 BUG_ON(offset >= PAGE_SIZE);
9ab86bbc 478 while (len) {
2613af0e
MD
479 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
480 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
481 frag_size, truesize);
482 len -= frag_size;
9ab86bbc
SM
483 page = (struct page *)page->private;
484 offset = 0;
485 }
3f2c31d9 486
9ab86bbc 487 if (page)
e9d7417b 488 give_pages(rq, page);
3f2c31d9 489
fb32856b
XZ
490ok:
491 /* hdr_valid means no XDP, so we can copy the vnet header */
492 if (hdr_valid) {
493 hdr = skb_vnet_hdr(skb);
494 memcpy(hdr, hdr_p, hdr_len);
495 }
af39c8f7
ED
496 if (page_to_free)
497 put_page(page_to_free);
fb32856b
XZ
498
499 if (metasize) {
500 __skb_pull(skb, metasize);
501 skb_metadata_set(skb, metasize);
502 }
503
9ab86bbc
SM
504 return skb;
505}
3f2c31d9 506
735fc405
JDB
507static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
508 struct send_queue *sq,
509 struct xdp_frame *xdpf)
56434a01 510{
56434a01 511 struct virtio_net_hdr_mrg_rxbuf *hdr;
56434a01
JF
512 int err;
513
cac320c8
JDB
514 if (unlikely(xdpf->headroom < vi->hdr_len))
515 return -EOVERFLOW;
516
517 /* Make room for virtqueue hdr (also change xdpf->headroom?) */
518 xdpf->data -= vi->hdr_len;
f6b10209 519 /* Zero header and leave csum up to XDP layers */
cac320c8 520 hdr = xdpf->data;
f6b10209 521 memset(hdr, 0, vi->hdr_len);
cac320c8 522 xdpf->len += vi->hdr_len;
bb91accf 523
cac320c8 524 sg_init_one(sq->sg, xdpf->data, xdpf->len);
bb91accf 525
5050471d
TM
526 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
527 GFP_ATOMIC);
11b7d897 528 if (unlikely(err))
cac320c8 529 return -ENOSPC; /* Caller handle free/refcnt */
56434a01 530
cac320c8 531 return 0;
56434a01
JF
532}
533
97c2c69e
XZ
534/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
535 * the current cpu, so it does not need to be locked.
536 *
537 * Here we use marco instead of inline functions because we have to deal with
538 * three issues at the same time: 1. the choice of sq. 2. judge and execute the
539 * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
540 * functions to perfectly solve these three problems at the same time.
541 */
542#define virtnet_xdp_get_sq(vi) ({ \
543 struct netdev_queue *txq; \
544 typeof(vi) v = (vi); \
545 unsigned int qp; \
546 \
547 if (v->curr_queue_pairs > nr_cpu_ids) { \
548 qp = v->curr_queue_pairs - v->xdp_queue_pairs; \
549 qp += smp_processor_id(); \
550 txq = netdev_get_tx_queue(v->dev, qp); \
551 __netif_tx_acquire(txq); \
552 } else { \
553 qp = smp_processor_id() % v->curr_queue_pairs; \
554 txq = netdev_get_tx_queue(v->dev, qp); \
555 __netif_tx_lock(txq, raw_smp_processor_id()); \
556 } \
557 v->sq + qp; \
558})
559
560#define virtnet_xdp_put_sq(vi, q) { \
561 struct netdev_queue *txq; \
562 typeof(vi) v = (vi); \
563 \
564 txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
565 if (v->curr_queue_pairs > nr_cpu_ids) \
566 __netif_tx_release(txq); \
567 else \
568 __netif_tx_unlock(txq); \
2a43565c
TM
569}
570
735fc405 571static int virtnet_xdp_xmit(struct net_device *dev,
42b33468 572 int n, struct xdp_frame **frames, u32 flags)
186b3c99
JW
573{
574 struct virtnet_info *vi = netdev_priv(dev);
8dcc5b0a
JDB
575 struct receive_queue *rq = vi->rq;
576 struct bpf_prog *xdp_prog;
735fc405
JDB
577 struct send_queue *sq;
578 unsigned int len;
546f2897
TM
579 int packets = 0;
580 int bytes = 0;
fdc13979 581 int nxmit = 0;
461f03dc 582 int kicks = 0;
5050471d 583 void *ptr;
fdc13979 584 int ret;
735fc405
JDB
585 int i;
586
8dcc5b0a
JDB
587 /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
588 * indicate XDP resources have been successfully allocated.
589 */
9719c6b9 590 xdp_prog = rcu_access_pointer(rq->xdp_prog);
1667c08a
TM
591 if (!xdp_prog)
592 return -ENXIO;
593
97c2c69e 594 sq = virtnet_xdp_get_sq(vi);
1667c08a
TM
595
596 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
597 ret = -EINVAL;
5b8f3c8d
TM
598 goto out;
599 }
8dcc5b0a 600
735fc405 601 /* Free up any pending old buffers before queueing new ones. */
5050471d 602 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
546f2897
TM
603 if (likely(is_xdp_frame(ptr))) {
604 struct xdp_frame *frame = ptr_to_xdp(ptr);
605
606 bytes += frame->len;
607 xdp_return_frame(frame);
608 } else {
609 struct sk_buff *skb = ptr;
610
611 bytes += skb->len;
612 napi_consume_skb(skb, false);
613 }
614 packets++;
5050471d 615 }
735fc405
JDB
616
617 for (i = 0; i < n; i++) {
618 struct xdp_frame *xdpf = frames[i];
619
fdc13979
LB
620 if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
621 break;
622 nxmit++;
735fc405 623 }
fdc13979 624 ret = nxmit;
5d274cb4 625
461f03dc
TM
626 if (flags & XDP_XMIT_FLUSH) {
627 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
628 kicks = 1;
629 }
5b8f3c8d
TM
630out:
631 u64_stats_update_begin(&sq->stats.syncp);
546f2897
TM
632 sq->stats.bytes += bytes;
633 sq->stats.packets += packets;
5b8f3c8d 634 sq->stats.xdp_tx += n;
fdc13979 635 sq->stats.xdp_tx_drops += n - nxmit;
461f03dc 636 sq->stats.kicks += kicks;
5b8f3c8d 637 u64_stats_update_end(&sq->stats.syncp);
5d274cb4 638
97c2c69e 639 virtnet_xdp_put_sq(vi, sq);
5b8f3c8d 640 return ret;
186b3c99
JW
641}
642
f6b10209
JW
643static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
644{
97c2c69e 645 return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
f6b10209
JW
646}
647
4941d472
JW
648/* We copy the packet for XDP in the following cases:
649 *
650 * 1) Packet is scattered across multiple rx buffers.
651 * 2) Headroom space is insufficient.
652 *
653 * This is inefficient but it's a temporary condition that
654 * we hit right after XDP is enabled and until queue is refilled
655 * with large buffers with sufficient headroom - so it should affect
656 * at most queue size packets.
657 * Afterwards, the conditions to enable
658 * XDP should preclude the underlying device from sending packets
659 * across multiple buffers (num_buf > 1), and we make sure buffers
660 * have enough headroom.
661 */
662static struct page *xdp_linearize_page(struct receive_queue *rq,
663 u16 *num_buf,
664 struct page *p,
665 int offset,
666 int page_off,
667 unsigned int *len)
668{
669 struct page *page = alloc_page(GFP_ATOMIC);
670
671 if (!page)
672 return NULL;
673
674 memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
675 page_off += *len;
676
677 while (--*num_buf) {
3cc81a9a 678 int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4941d472
JW
679 unsigned int buflen;
680 void *buf;
681 int off;
682
683 buf = virtqueue_get_buf(rq->vq, &buflen);
684 if (unlikely(!buf))
685 goto err_buf;
686
687 p = virt_to_head_page(buf);
688 off = buf - page_address(p);
689
690 /* guard against a misconfigured or uncooperative backend that
691 * is sending packet larger than the MTU.
692 */
3cc81a9a 693 if ((page_off + buflen + tailroom) > PAGE_SIZE) {
4941d472
JW
694 put_page(p);
695 goto err_buf;
696 }
697
698 memcpy(page_address(page) + page_off,
699 page_address(p) + off, buflen);
700 page_off += buflen;
701 put_page(p);
702 }
703
704 /* Headroom does not contribute to packet length */
705 *len = page_off - VIRTIO_XDP_HEADROOM;
706 return page;
707err_buf:
708 __free_pages(page, 0);
709 return NULL;
710}
711
bb91accf
JW
712static struct sk_buff *receive_small(struct net_device *dev,
713 struct virtnet_info *vi,
714 struct receive_queue *rq,
192f68cf 715 void *buf, void *ctx,
186b3c99 716 unsigned int len,
7d9d60fd 717 unsigned int *xdp_xmit,
d46eeeaf 718 struct virtnet_rq_stats *stats)
f121159d 719{
f6b10209 720 struct sk_buff *skb;
bb91accf 721 struct bpf_prog *xdp_prog;
4941d472 722 unsigned int xdp_headroom = (unsigned long)ctx;
f6b10209
JW
723 unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
724 unsigned int headroom = vi->hdr_len + header_offset;
725 unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
726 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4941d472 727 struct page *page = virt_to_head_page(buf);
11b7d897 728 unsigned int delta = 0;
4941d472 729 struct page *xdp_page;
11b7d897 730 int err;
503d539a 731 unsigned int metasize = 0;
11b7d897 732
012873d0 733 len -= vi->hdr_len;
d46eeeaf 734 stats->bytes += len;
f121159d 735
ad993a95
XY
736 if (unlikely(len > GOOD_PACKET_LEN)) {
737 pr_debug("%s: rx error: len %u exceeds max size %d\n",
738 dev->name, len, GOOD_PACKET_LEN);
739 dev->stats.rx_length_errors++;
740 goto err_len;
741 }
bb91accf
JW
742 rcu_read_lock();
743 xdp_prog = rcu_dereference(rq->xdp_prog);
744 if (xdp_prog) {
f6b10209 745 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
44fa2dbd 746 struct xdp_frame *xdpf;
0354e4d1 747 struct xdp_buff xdp;
f6b10209 748 void *orig_data;
bb91accf
JW
749 u32 act;
750
95dbe9e7 751 if (unlikely(hdr->hdr.gso_type))
bb91accf 752 goto err_xdp;
0354e4d1 753
4941d472
JW
754 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
755 int offset = buf - page_address(page) + header_offset;
756 unsigned int tlen = len + vi->hdr_len;
757 u16 num_buf = 1;
758
759 xdp_headroom = virtnet_get_headroom(vi);
760 header_offset = VIRTNET_RX_PAD + xdp_headroom;
761 headroom = vi->hdr_len + header_offset;
762 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
763 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
764 xdp_page = xdp_linearize_page(rq, &num_buf, page,
765 offset, header_offset,
766 &tlen);
767 if (!xdp_page)
768 goto err_xdp;
769
770 buf = page_address(xdp_page);
771 put_page(page);
772 page = xdp_page;
773 }
774
43b5169d 775 xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
be9df4af
LB
776 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
777 xdp_headroom, len, true);
f6b10209 778 orig_data = xdp.data;
0354e4d1 779 act = bpf_prog_run_xdp(xdp_prog, &xdp);
d46eeeaf 780 stats->xdp_packets++;
0354e4d1 781
bb91accf
JW
782 switch (act) {
783 case XDP_PASS:
2de2f7f4 784 /* Recalculate length in case bpf program changed it */
f6b10209 785 delta = orig_data - xdp.data;
6870de43 786 len = xdp.data_end - xdp.data;
503d539a 787 metasize = xdp.data - xdp.data_meta;
bb91accf
JW
788 break;
789 case XDP_TX:
d46eeeaf 790 stats->xdp_tx++;
1b698fa5 791 xdpf = xdp_convert_buff_to_frame(&xdp);
44fa2dbd
JDB
792 if (unlikely(!xdpf))
793 goto err_xdp;
ca9e83b4 794 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
fdc13979
LB
795 if (unlikely(!err)) {
796 xdp_return_frame_rx_napi(xdpf);
797 } else if (unlikely(err < 0)) {
0354e4d1 798 trace_xdp_exception(vi->dev, xdp_prog, act);
11b7d897
JDB
799 goto err_xdp;
800 }
2471c75e 801 *xdp_xmit |= VIRTIO_XDP_TX;
186b3c99
JW
802 rcu_read_unlock();
803 goto xdp_xmit;
804 case XDP_REDIRECT:
d46eeeaf 805 stats->xdp_redirects++;
186b3c99 806 err = xdp_do_redirect(dev, &xdp, xdp_prog);
11b7d897
JDB
807 if (err)
808 goto err_xdp;
2471c75e 809 *xdp_xmit |= VIRTIO_XDP_REDIR;
bb91accf
JW
810 rcu_read_unlock();
811 goto xdp_xmit;
bb91accf 812 default:
0354e4d1 813 bpf_warn_invalid_xdp_action(act);
df561f66 814 fallthrough;
0354e4d1
JF
815 case XDP_ABORTED:
816 trace_xdp_exception(vi->dev, xdp_prog, act);
95efabf0 817 goto err_xdp;
0354e4d1 818 case XDP_DROP:
bb91accf
JW
819 goto err_xdp;
820 }
821 }
822 rcu_read_unlock();
823
f6b10209
JW
824 skb = build_skb(buf, buflen);
825 if (!skb) {
4941d472 826 put_page(page);
f6b10209
JW
827 goto err;
828 }
829 skb_reserve(skb, headroom - delta);
6870de43 830 skb_put(skb, len);
f1d4884d 831 if (!xdp_prog) {
f6b10209
JW
832 buf += header_offset;
833 memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
f1d4884d 834 } /* keep zeroed vnet hdr since XDP is loaded */
f6b10209 835
503d539a
YK
836 if (metasize)
837 skb_metadata_set(skb, metasize);
838
f6b10209 839err:
f121159d 840 return skb;
bb91accf
JW
841
842err_xdp:
843 rcu_read_unlock();
d46eeeaf 844 stats->xdp_drops++;
ad993a95 845err_len:
d46eeeaf 846 stats->drops++;
4941d472 847 put_page(page);
bb91accf
JW
848xdp_xmit:
849 return NULL;
f121159d
MT
850}
851
852static struct sk_buff *receive_big(struct net_device *dev,
946fa564 853 struct virtnet_info *vi,
f121159d
MT
854 struct receive_queue *rq,
855 void *buf,
7d9d60fd 856 unsigned int len,
d46eeeaf 857 struct virtnet_rq_stats *stats)
f121159d
MT
858{
859 struct page *page = buf;
503d539a 860 struct sk_buff *skb =
fb32856b 861 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0, 0);
f600b690 862
d46eeeaf 863 stats->bytes += len - vi->hdr_len;
f121159d
MT
864 if (unlikely(!skb))
865 goto err;
866
867 return skb;
868
869err:
d46eeeaf 870 stats->drops++;
f121159d
MT
871 give_pages(rq, page);
872 return NULL;
873}
874
8fc3b9e9 875static struct sk_buff *receive_mergeable(struct net_device *dev,
fdd819b2 876 struct virtnet_info *vi,
8fc3b9e9 877 struct receive_queue *rq,
680557cf
MT
878 void *buf,
879 void *ctx,
186b3c99 880 unsigned int len,
7d9d60fd 881 unsigned int *xdp_xmit,
d46eeeaf 882 struct virtnet_rq_stats *stats)
9ab86bbc 883{
012873d0
MT
884 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
885 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
8fc3b9e9
MT
886 struct page *page = virt_to_head_page(buf);
887 int offset = buf - page_address(page);
f600b690
JF
888 struct sk_buff *head_skb, *curr_skb;
889 struct bpf_prog *xdp_prog;
9ce6146e 890 unsigned int truesize = mergeable_ctx_to_truesize(ctx);
4941d472 891 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
503d539a 892 unsigned int metasize = 0;
9ce6146e
JDB
893 unsigned int frame_sz;
894 int err;
f600b690 895
56434a01 896 head_skb = NULL;
d46eeeaf 897 stats->bytes += len - vi->hdr_len;
56434a01 898
ad993a95
XY
899 if (unlikely(len > truesize)) {
900 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
901 dev->name, len, (unsigned long)ctx);
902 dev->stats.rx_length_errors++;
903 goto err_skb;
904 }
f600b690
JF
905 rcu_read_lock();
906 xdp_prog = rcu_dereference(rq->xdp_prog);
907 if (xdp_prog) {
44fa2dbd 908 struct xdp_frame *xdpf;
72979a6c 909 struct page *xdp_page;
0354e4d1 910 struct xdp_buff xdp;
0354e4d1 911 void *data;
f600b690
JF
912 u32 act;
913
3d62b2a0
JW
914 /* Transient failure which in theory could occur if
915 * in-flight packets from before XDP was enabled reach
916 * the receive path after XDP is loaded.
917 */
918 if (unlikely(hdr->hdr.gso_type))
919 goto err_xdp;
920
9ce6146e
JDB
921 /* Buffers with headroom use PAGE_SIZE as alloc size,
922 * see add_recvbuf_mergeable() + get_mergeable_buf_len()
923 */
924 frame_sz = headroom ? PAGE_SIZE : truesize;
925
3cc81a9a
JW
926 /* This happens when rx buffer size is underestimated
927 * or headroom is not enough because of the buffer
928 * was refilled before XDP is set. This should only
929 * happen for the first several packets, so we don't
930 * care much about its performance.
931 */
4941d472
JW
932 if (unlikely(num_buf > 1 ||
933 headroom < virtnet_get_headroom(vi))) {
72979a6c 934 /* linearize data for XDP */
56a86f84 935 xdp_page = xdp_linearize_page(rq, &num_buf,
4941d472
JW
936 page, offset,
937 VIRTIO_XDP_HEADROOM,
938 &len);
9ce6146e
JDB
939 frame_sz = PAGE_SIZE;
940
72979a6c
JF
941 if (!xdp_page)
942 goto err_xdp;
2de2f7f4 943 offset = VIRTIO_XDP_HEADROOM;
72979a6c
JF
944 } else {
945 xdp_page = page;
f600b690
JF
946 }
947
2de2f7f4
JF
948 /* Allow consuming headroom but reserve enough space to push
949 * the descriptor on if we get an XDP_TX return code.
950 */
0354e4d1 951 data = page_address(xdp_page) + offset;
43b5169d 952 xdp_init_buff(&xdp, frame_sz - vi->hdr_len, &rq->xdp_rxq);
be9df4af
LB
953 xdp_prepare_buff(&xdp, data - VIRTIO_XDP_HEADROOM + vi->hdr_len,
954 VIRTIO_XDP_HEADROOM, len - vi->hdr_len, true);
754b8a21 955
0354e4d1 956 act = bpf_prog_run_xdp(xdp_prog, &xdp);
d46eeeaf 957 stats->xdp_packets++;
0354e4d1 958
56434a01
JF
959 switch (act) {
960 case XDP_PASS:
503d539a
YK
961 metasize = xdp.data - xdp.data_meta;
962
2de2f7f4 963 /* recalculate offset to account for any header
503d539a
YK
964 * adjustments and minus the metasize to copy the
965 * metadata in page_to_skb(). Note other cases do not
966 * build an skb and avoid using offset
2de2f7f4 967 */
503d539a
YK
968 offset = xdp.data - page_address(xdp_page) -
969 vi->hdr_len - metasize;
2de2f7f4 970
503d539a
YK
971 /* recalculate len if xdp.data, xdp.data_end or
972 * xdp.data_meta were adjusted
6870de43 973 */
503d539a 974 len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
1830f893
JW
975 /* We can only create skb based on xdp_page. */
976 if (unlikely(xdp_page != page)) {
977 rcu_read_unlock();
978 put_page(page);
503d539a
YK
979 head_skb = page_to_skb(vi, rq, xdp_page, offset,
980 len, PAGE_SIZE, false,
6c66c147 981 metasize, true);
1830f893
JW
982 return head_skb;
983 }
56434a01
JF
984 break;
985 case XDP_TX:
d46eeeaf 986 stats->xdp_tx++;
1b698fa5 987 xdpf = xdp_convert_buff_to_frame(&xdp);
44fa2dbd
JDB
988 if (unlikely(!xdpf))
989 goto err_xdp;
ca9e83b4 990 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
fdc13979
LB
991 if (unlikely(!err)) {
992 xdp_return_frame_rx_napi(xdpf);
993 } else if (unlikely(err < 0)) {
0354e4d1 994 trace_xdp_exception(vi->dev, xdp_prog, act);
11b7d897
JDB
995 if (unlikely(xdp_page != page))
996 put_page(xdp_page);
997 goto err_xdp;
998 }
2471c75e 999 *xdp_xmit |= VIRTIO_XDP_TX;
72979a6c 1000 if (unlikely(xdp_page != page))
5d458a13 1001 put_page(page);
56434a01
JF
1002 rcu_read_unlock();
1003 goto xdp_xmit;
3cc81a9a 1004 case XDP_REDIRECT:
d46eeeaf 1005 stats->xdp_redirects++;
3cc81a9a
JW
1006 err = xdp_do_redirect(dev, &xdp, xdp_prog);
1007 if (err) {
1008 if (unlikely(xdp_page != page))
1009 put_page(xdp_page);
1010 goto err_xdp;
1011 }
2471c75e 1012 *xdp_xmit |= VIRTIO_XDP_REDIR;
3cc81a9a 1013 if (unlikely(xdp_page != page))
6890418b 1014 put_page(page);
3cc81a9a
JW
1015 rcu_read_unlock();
1016 goto xdp_xmit;
56434a01 1017 default:
0354e4d1 1018 bpf_warn_invalid_xdp_action(act);
df561f66 1019 fallthrough;
0354e4d1
JF
1020 case XDP_ABORTED:
1021 trace_xdp_exception(vi->dev, xdp_prog, act);
df561f66 1022 fallthrough;
0354e4d1 1023 case XDP_DROP:
72979a6c
JF
1024 if (unlikely(xdp_page != page))
1025 __free_pages(xdp_page, 0);
f600b690 1026 goto err_xdp;
56434a01 1027 }
f600b690
JF
1028 }
1029 rcu_read_unlock();
ab7db917 1030
503d539a 1031 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
6c66c147 1032 metasize, !!headroom);
f600b690 1033 curr_skb = head_skb;
9ab86bbc 1034
8fc3b9e9
MT
1035 if (unlikely(!curr_skb))
1036 goto err_skb;
9ab86bbc 1037 while (--num_buf) {
8fc3b9e9
MT
1038 int num_skb_frags;
1039
680557cf 1040 buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
03e9f8a0 1041 if (unlikely(!buf)) {
8fc3b9e9 1042 pr_debug("%s: rx error: %d buffers out of %d missing\n",
fdd819b2 1043 dev->name, num_buf,
012873d0
MT
1044 virtio16_to_cpu(vi->vdev,
1045 hdr->num_buffers));
8fc3b9e9
MT
1046 dev->stats.rx_length_errors++;
1047 goto err_buf;
3f2c31d9 1048 }
8fc3b9e9 1049
d46eeeaf 1050 stats->bytes += len;
8fc3b9e9 1051 page = virt_to_head_page(buf);
28b39bc7
JW
1052
1053 truesize = mergeable_ctx_to_truesize(ctx);
1054 if (unlikely(len > truesize)) {
56da5fd0 1055 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
680557cf
MT
1056 dev->name, len, (unsigned long)ctx);
1057 dev->stats.rx_length_errors++;
1058 goto err_skb;
1059 }
8fc3b9e9
MT
1060
1061 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
2613af0e
MD
1062 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
1063 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
8fc3b9e9
MT
1064
1065 if (unlikely(!nskb))
1066 goto err_skb;
2613af0e
MD
1067 if (curr_skb == head_skb)
1068 skb_shinfo(curr_skb)->frag_list = nskb;
1069 else
1070 curr_skb->next = nskb;
1071 curr_skb = nskb;
1072 head_skb->truesize += nskb->truesize;
1073 num_skb_frags = 0;
1074 }
1075 if (curr_skb != head_skb) {
1076 head_skb->data_len += len;
1077 head_skb->len += len;
fb51879d 1078 head_skb->truesize += truesize;
2613af0e 1079 }
8fc3b9e9 1080 offset = buf - page_address(page);
ba275241
JW
1081 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
1082 put_page(page);
1083 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
fb51879d 1084 len, truesize);
ba275241
JW
1085 } else {
1086 skb_add_rx_frag(curr_skb, num_skb_frags, page,
fb51879d 1087 offset, len, truesize);
ba275241 1088 }
8fc3b9e9
MT
1089 }
1090
5377d758 1091 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
8fc3b9e9
MT
1092 return head_skb;
1093
f600b690
JF
1094err_xdp:
1095 rcu_read_unlock();
d46eeeaf 1096 stats->xdp_drops++;
8fc3b9e9
MT
1097err_skb:
1098 put_page(page);
850e088d 1099 while (num_buf-- > 1) {
680557cf
MT
1100 buf = virtqueue_get_buf(rq->vq, &len);
1101 if (unlikely(!buf)) {
8fc3b9e9
MT
1102 pr_debug("%s: rx error: %d buffers missing\n",
1103 dev->name, num_buf);
1104 dev->stats.rx_length_errors++;
1105 break;
1106 }
d46eeeaf 1107 stats->bytes += len;
680557cf 1108 page = virt_to_head_page(buf);
8fc3b9e9 1109 put_page(page);
9ab86bbc 1110 }
8fc3b9e9 1111err_buf:
d46eeeaf 1112 stats->drops++;
8fc3b9e9 1113 dev_kfree_skb(head_skb);
56434a01 1114xdp_xmit:
8fc3b9e9 1115 return NULL;
9ab86bbc
SM
1116}
1117
7d9d60fd
TM
1118static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
1119 void *buf, unsigned int len, void **ctx,
a0929a44 1120 unsigned int *xdp_xmit,
d46eeeaf 1121 struct virtnet_rq_stats *stats)
9ab86bbc 1122{
e9d7417b 1123 struct net_device *dev = vi->dev;
9ab86bbc 1124 struct sk_buff *skb;
012873d0 1125 struct virtio_net_hdr_mrg_rxbuf *hdr;
3f2c31d9 1126
bcff3162 1127 if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
9ab86bbc
SM
1128 pr_debug("%s: short packet %i\n", dev->name, len);
1129 dev->stats.rx_length_errors++;
ab7db917 1130 if (vi->mergeable_rx_bufs) {
680557cf 1131 put_page(virt_to_head_page(buf));
ab7db917 1132 } else if (vi->big_packets) {
98bfd23c 1133 give_pages(rq, buf);
ab7db917 1134 } else {
f6b10209 1135 put_page(virt_to_head_page(buf));
ab7db917 1136 }
7d9d60fd 1137 return;
9ab86bbc 1138 }
3f2c31d9 1139
f121159d 1140 if (vi->mergeable_rx_bufs)
7d9d60fd 1141 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
a0929a44 1142 stats);
f121159d 1143 else if (vi->big_packets)
a0929a44 1144 skb = receive_big(dev, vi, rq, buf, len, stats);
f121159d 1145 else
a0929a44 1146 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
f121159d
MT
1147
1148 if (unlikely(!skb))
7d9d60fd 1149 return;
3f2c31d9 1150
9ab86bbc 1151 hdr = skb_vnet_hdr(skb);
3fa2a1df 1152
e858fae2 1153 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
10a8d94a 1154 skb->ip_summed = CHECKSUM_UNNECESSARY;
296f96fc 1155
e858fae2
MR
1156 if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
1157 virtio_is_little_endian(vi->vdev))) {
1158 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
1159 dev->name, hdr->hdr.gso_type,
1160 hdr->hdr.gso_size);
1161 goto frame_err;
296f96fc
RR
1162 }
1163
133bbb18 1164 skb_record_rx_queue(skb, vq2rxq(rq->vq));
d1dc06dc
MR
1165 skb->protocol = eth_type_trans(skb, dev);
1166 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
1167 ntohs(skb->protocol), skb->len, skb->pkt_type);
1168
0fbd050a 1169 napi_gro_receive(&rq->napi, skb);
7d9d60fd 1170 return;
296f96fc
RR
1171
1172frame_err:
1173 dev->stats.rx_frame_errors++;
296f96fc
RR
1174 dev_kfree_skb(skb);
1175}
1176
192f68cf
JW
1177/* Unlike mergeable buffers, all buffers are allocated to the
1178 * same size, except for the headroom. For this reason we do
1179 * not need to use mergeable_len_to_ctx here - it is enough
1180 * to store the headroom as the context ignoring the truesize.
1181 */
946fa564
MT
1182static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
1183 gfp_t gfp)
296f96fc 1184{
f6b10209
JW
1185 struct page_frag *alloc_frag = &rq->alloc_frag;
1186 char *buf;
2de2f7f4 1187 unsigned int xdp_headroom = virtnet_get_headroom(vi);
192f68cf 1188 void *ctx = (void *)(unsigned long)xdp_headroom;
f6b10209 1189 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
9ab86bbc 1190 int err;
3f2c31d9 1191
f6b10209
JW
1192 len = SKB_DATA_ALIGN(len) +
1193 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1194 if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
9ab86bbc 1195 return -ENOMEM;
296f96fc 1196
f6b10209
JW
1197 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1198 get_page(alloc_frag->page);
1199 alloc_frag->offset += len;
1200 sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom,
1201 vi->hdr_len + GOOD_PACKET_LEN);
192f68cf 1202 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
9ab86bbc 1203 if (err < 0)
f6b10209 1204 put_page(virt_to_head_page(buf));
9ab86bbc
SM
1205 return err;
1206}
97402b96 1207
012873d0
MT
1208static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
1209 gfp_t gfp)
9ab86bbc 1210{
9ab86bbc
SM
1211 struct page *first, *list = NULL;
1212 char *p;
1213 int i, err, offset;
1214
a5835440
RR
1215 sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
1216
e9d7417b 1217 /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
9ab86bbc 1218 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
e9d7417b 1219 first = get_a_page(rq, gfp);
9ab86bbc
SM
1220 if (!first) {
1221 if (list)
e9d7417b 1222 give_pages(rq, list);
9ab86bbc 1223 return -ENOMEM;
97402b96 1224 }
e9d7417b 1225 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
97402b96 1226
9ab86bbc
SM
1227 /* chain new page in list head to match sg */
1228 first->private = (unsigned long)list;
1229 list = first;
1230 }
296f96fc 1231
e9d7417b 1232 first = get_a_page(rq, gfp);
9ab86bbc 1233 if (!first) {
e9d7417b 1234 give_pages(rq, list);
9ab86bbc
SM
1235 return -ENOMEM;
1236 }
1237 p = page_address(first);
1238
e9d7417b 1239 /* rq->sg[0], rq->sg[1] share the same page */
012873d0
MT
1240 /* a separated rq->sg[0] for header - required in case !any_header_sg */
1241 sg_set_buf(&rq->sg[0], p, vi->hdr_len);
9ab86bbc 1242
e9d7417b 1243 /* rq->sg[1] for data packet, from offset */
9ab86bbc 1244 offset = sizeof(struct padded_vnet_hdr);
e9d7417b 1245 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
9ab86bbc
SM
1246
1247 /* chain first in list head */
1248 first->private = (unsigned long)list;
9dc7b9e4
RR
1249 err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
1250 first, gfp);
9ab86bbc 1251 if (err < 0)
e9d7417b 1252 give_pages(rq, first);
9ab86bbc
SM
1253
1254 return err;
296f96fc
RR
1255}
1256
d85b758f 1257static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
3cc81a9a
JW
1258 struct ewma_pkt_len *avg_pkt_len,
1259 unsigned int room)
3f2c31d9 1260{
ab7db917 1261 const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
fbf28d78
MD
1262 unsigned int len;
1263
3cc81a9a
JW
1264 if (room)
1265 return PAGE_SIZE - room;
1266
1267 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
f0c3192c 1268 rq->min_buf_len, PAGE_SIZE - hdr_len);
3cc81a9a 1269
e377fcc8 1270 return ALIGN(len, L1_CACHE_BYTES);
fbf28d78
MD
1271}
1272
2de2f7f4
JF
1273static int add_recvbuf_mergeable(struct virtnet_info *vi,
1274 struct receive_queue *rq, gfp_t gfp)
fbf28d78 1275{
fb51879d 1276 struct page_frag *alloc_frag = &rq->alloc_frag;
2de2f7f4 1277 unsigned int headroom = virtnet_get_headroom(vi);
3cc81a9a
JW
1278 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1279 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
fb51879d 1280 char *buf;
680557cf 1281 void *ctx;
3f2c31d9 1282 int err;
fb51879d 1283 unsigned int len, hole;
3f2c31d9 1284
3cc81a9a
JW
1285 /* Extra tailroom is needed to satisfy XDP's assumption. This
1286 * means rx frags coalescing won't work, but consider we've
1287 * disabled GSO for XDP, it won't be a big issue.
1288 */
1289 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
1290 if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
9ab86bbc 1291 return -ENOMEM;
ab7db917 1292
fb51879d 1293 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
2de2f7f4 1294 buf += headroom; /* advance address leaving hole at front of pkt */
fb51879d 1295 get_page(alloc_frag->page);
3cc81a9a 1296 alloc_frag->offset += len + room;
fb51879d 1297 hole = alloc_frag->size - alloc_frag->offset;
3cc81a9a 1298 if (hole < len + room) {
ab7db917
MD
1299 /* To avoid internal fragmentation, if there is very likely not
1300 * enough space for another buffer, add the remaining space to
1daa8790 1301 * the current buffer.
ab7db917 1302 */
fb51879d
MD
1303 len += hole;
1304 alloc_frag->offset += hole;
1305 }
3f2c31d9 1306
fb51879d 1307 sg_init_one(rq->sg, buf, len);
29fda25a 1308 ctx = mergeable_len_to_ctx(len, headroom);
680557cf 1309 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
9ab86bbc 1310 if (err < 0)
2613af0e 1311 put_page(virt_to_head_page(buf));
3f2c31d9 1312
9ab86bbc
SM
1313 return err;
1314}
3f2c31d9 1315
b2baed69
RR
1316/*
1317 * Returns false if we couldn't fill entirely (OOM).
1318 *
1319 * Normally run in the receive path, but can also be run from ndo_open
1320 * before we're receiving packets, or from refill_work which is
1321 * careful to disable receiving (using napi_disable).
1322 */
946fa564
MT
1323static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
1324 gfp_t gfp)
9ab86bbc
SM
1325{
1326 int err;
1788f495 1327 bool oom;
3f2c31d9 1328
9ab86bbc
SM
1329 do {
1330 if (vi->mergeable_rx_bufs)
2de2f7f4 1331 err = add_recvbuf_mergeable(vi, rq, gfp);
9ab86bbc 1332 else if (vi->big_packets)
012873d0 1333 err = add_recvbuf_big(vi, rq, gfp);
9ab86bbc 1334 else
946fa564 1335 err = add_recvbuf_small(vi, rq, gfp);
3f2c31d9 1336
1788f495 1337 oom = err == -ENOMEM;
9ed4cb07 1338 if (err)
3f2c31d9 1339 break;
b7dfde95 1340 } while (rq->vq->num_free);
461f03dc 1341 if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
01c32598
MT
1342 unsigned long flags;
1343
1344 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
d46eeeaf 1345 rq->stats.kicks++;
01c32598 1346 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
461f03dc
TM
1347 }
1348
3161e453 1349 return !oom;
3f2c31d9
MM
1350}
1351
18445c4d 1352static void skb_recv_done(struct virtqueue *rvq)
296f96fc
RR
1353{
1354 struct virtnet_info *vi = rvq->vdev->priv;
986a4f4d 1355 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
e9d7417b 1356
e4e8452a 1357 virtqueue_napi_schedule(&rq->napi, rvq);
296f96fc
RR
1358}
1359
e4e8452a 1360static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
3e9d08ec 1361{
e4e8452a 1362 napi_enable(napi);
3e9d08ec
BR
1363
1364 /* If all buffers were filled by other side before we napi_enabled, we
e4e8452a
WB
1365 * won't get another interrupt, so process any outstanding packets now.
1366 * Call local_bh_enable after to trigger softIRQ processing.
1367 */
1368 local_bh_disable();
1369 virtqueue_napi_schedule(napi, vq);
1370 local_bh_enable();
3e9d08ec
BR
1371}
1372
b92f1e67
WB
1373static void virtnet_napi_tx_enable(struct virtnet_info *vi,
1374 struct virtqueue *vq,
1375 struct napi_struct *napi)
1376{
1377 if (!napi->weight)
1378 return;
1379
1380 /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
1381 * enable the feature if this is likely affine with the transmit path.
1382 */
1383 if (!vi->affinity_hint_set) {
1384 napi->weight = 0;
1385 return;
1386 }
1387
1388 return virtnet_napi_enable(vq, napi);
1389}
1390
78a57b48
WB
1391static void virtnet_napi_tx_disable(struct napi_struct *napi)
1392{
1393 if (napi->weight)
1394 napi_disable(napi);
1395}
1396
3161e453
RR
1397static void refill_work(struct work_struct *work)
1398{
e9d7417b
JW
1399 struct virtnet_info *vi =
1400 container_of(work, struct virtnet_info, refill.work);
3161e453 1401 bool still_empty;
986a4f4d
JW
1402 int i;
1403
55257d72 1404 for (i = 0; i < vi->curr_queue_pairs; i++) {
986a4f4d 1405 struct receive_queue *rq = &vi->rq[i];
3161e453 1406
986a4f4d 1407 napi_disable(&rq->napi);
946fa564 1408 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
e4e8452a 1409 virtnet_napi_enable(rq->vq, &rq->napi);
3161e453 1410
986a4f4d
JW
1411 /* In theory, this can happen: if we don't get any buffers in
1412 * we will *never* try to fill again.
1413 */
1414 if (still_empty)
1415 schedule_delayed_work(&vi->refill, HZ/2);
1416 }
3161e453
RR
1417}
1418
2471c75e
JDB
1419static int virtnet_receive(struct receive_queue *rq, int budget,
1420 unsigned int *xdp_xmit)
296f96fc 1421{
e9d7417b 1422 struct virtnet_info *vi = rq->vq->vdev->priv;
d46eeeaf 1423 struct virtnet_rq_stats stats = {};
a0929a44 1424 unsigned int len;
9ab86bbc 1425 void *buf;
a0929a44 1426 int i;
296f96fc 1427
192f68cf 1428 if (!vi->big_packets || vi->mergeable_rx_bufs) {
680557cf
MT
1429 void *ctx;
1430
d46eeeaf 1431 while (stats.packets < budget &&
680557cf 1432 (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
a0929a44 1433 receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
d46eeeaf 1434 stats.packets++;
680557cf
MT
1435 }
1436 } else {
d46eeeaf 1437 while (stats.packets < budget &&
680557cf 1438 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
a0929a44 1439 receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
d46eeeaf 1440 stats.packets++;
680557cf 1441 }
296f96fc
RR
1442 }
1443
718be6ba 1444 if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
946fa564 1445 if (!try_fill_recv(vi, rq, GFP_ATOMIC))
3b07e9ca 1446 schedule_delayed_work(&vi->refill, 0);
3161e453 1447 }
296f96fc 1448
d7dfc5cf 1449 u64_stats_update_begin(&rq->stats.syncp);
a0929a44
TM
1450 for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
1451 size_t offset = virtnet_rq_stats_desc[i].offset;
1452 u64 *item;
1453
d46eeeaf
JW
1454 item = (u64 *)((u8 *)&rq->stats + offset);
1455 *item += *(u64 *)((u8 *)&stats + offset);
a0929a44 1456 }
d7dfc5cf 1457 u64_stats_update_end(&rq->stats.syncp);
61845d20 1458
d46eeeaf 1459 return stats.packets;
2ffa7598
JW
1460}
1461
df133f3f 1462static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
ea7735d9 1463{
ea7735d9 1464 unsigned int len;
ea7735d9
WB
1465 unsigned int packets = 0;
1466 unsigned int bytes = 0;
5050471d 1467 void *ptr;
ea7735d9 1468
5050471d
TM
1469 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
1470 if (likely(!is_xdp_frame(ptr))) {
1471 struct sk_buff *skb = ptr;
ea7735d9 1472
5050471d 1473 pr_debug("Sent skb %p\n", skb);
ea7735d9 1474
5050471d
TM
1475 bytes += skb->len;
1476 napi_consume_skb(skb, in_napi);
1477 } else {
1478 struct xdp_frame *frame = ptr_to_xdp(ptr);
ea7735d9 1479
5050471d
TM
1480 bytes += frame->len;
1481 xdp_return_frame(frame);
1482 }
1483 packets++;
ea7735d9
WB
1484 }
1485
1486 /* Avoid overhead when no packets have been processed
1487 * happens when called speculatively from start_xmit.
1488 */
1489 if (!packets)
1490 return;
1491
d7dfc5cf
TM
1492 u64_stats_update_begin(&sq->stats.syncp);
1493 sq->stats.bytes += bytes;
1494 sq->stats.packets += packets;
1495 u64_stats_update_end(&sq->stats.syncp);
ea7735d9
WB
1496}
1497
534da5e8
TM
1498static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
1499{
1500 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
1501 return false;
1502 else if (q < vi->curr_queue_pairs)
1503 return true;
1504 else
1505 return false;
1506}
1507
7b0411ef
WB
1508static void virtnet_poll_cleantx(struct receive_queue *rq)
1509{
1510 struct virtnet_info *vi = rq->vq->vdev->priv;
1511 unsigned int index = vq2rxq(rq->vq);
1512 struct send_queue *sq = &vi->sq[index];
1513 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
1514
534da5e8 1515 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
7b0411ef
WB
1516 return;
1517
1518 if (__netif_tx_trylock(txq)) {
df133f3f 1519 free_old_xmit_skbs(sq, true);
7b0411ef
WB
1520 __netif_tx_unlock(txq);
1521 }
1522
1523 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
1524 netif_tx_wake_queue(txq);
1525}
1526
2ffa7598
JW
1527static int virtnet_poll(struct napi_struct *napi, int budget)
1528{
1529 struct receive_queue *rq =
1530 container_of(napi, struct receive_queue, napi);
9267c430
JW
1531 struct virtnet_info *vi = rq->vq->vdev->priv;
1532 struct send_queue *sq;
2a43565c 1533 unsigned int received;
2471c75e 1534 unsigned int xdp_xmit = 0;
2ffa7598 1535
7b0411ef
WB
1536 virtnet_poll_cleantx(rq);
1537
186b3c99 1538 received = virtnet_receive(rq, budget, &xdp_xmit);
2ffa7598 1539
8329d98e 1540 /* Out of packets? */
e4e8452a
WB
1541 if (received < budget)
1542 virtqueue_napi_complete(napi, rq->vq, received);
296f96fc 1543
2471c75e 1544 if (xdp_xmit & VIRTIO_XDP_REDIR)
1d233886 1545 xdp_do_flush();
2471c75e
JDB
1546
1547 if (xdp_xmit & VIRTIO_XDP_TX) {
97c2c69e 1548 sq = virtnet_xdp_get_sq(vi);
461f03dc
TM
1549 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
1550 u64_stats_update_begin(&sq->stats.syncp);
1551 sq->stats.kicks++;
1552 u64_stats_update_end(&sq->stats.syncp);
1553 }
97c2c69e 1554 virtnet_xdp_put_sq(vi, sq);
9267c430 1555 }
186b3c99 1556
296f96fc
RR
1557 return received;
1558}
1559
986a4f4d
JW
1560static int virtnet_open(struct net_device *dev)
1561{
1562 struct virtnet_info *vi = netdev_priv(dev);
754b8a21 1563 int i, err;
986a4f4d 1564
e4166625
JW
1565 for (i = 0; i < vi->max_queue_pairs; i++) {
1566 if (i < vi->curr_queue_pairs)
1567 /* Make sure we have some buffers: if oom use wq. */
946fa564 1568 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
e4166625 1569 schedule_delayed_work(&vi->refill, 0);
754b8a21 1570
b02e5a0e 1571 err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id);
754b8a21
JDB
1572 if (err < 0)
1573 return err;
1574
8d5d8852
JDB
1575 err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq,
1576 MEM_TYPE_PAGE_SHARED, NULL);
1577 if (err < 0) {
1578 xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
1579 return err;
1580 }
1581
e4e8452a 1582 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
b92f1e67 1583 virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
986a4f4d
JW
1584 }
1585
1586 return 0;
1587}
1588
b92f1e67
WB
1589static int virtnet_poll_tx(struct napi_struct *napi, int budget)
1590{
1591 struct send_queue *sq = container_of(napi, struct send_queue, napi);
1592 struct virtnet_info *vi = sq->vq->vdev->priv;
534da5e8
TM
1593 unsigned int index = vq2txq(sq->vq);
1594 struct netdev_queue *txq;
b92f1e67 1595
534da5e8
TM
1596 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
1597 /* We don't need to enable cb for XDP */
1598 napi_complete_done(napi, 0);
1599 return 0;
1600 }
1601
1602 txq = netdev_get_tx_queue(vi->dev, index);
b92f1e67 1603 __netif_tx_lock(txq, raw_smp_processor_id());
df133f3f 1604 free_old_xmit_skbs(sq, true);
b92f1e67
WB
1605 __netif_tx_unlock(txq);
1606
1607 virtqueue_napi_complete(napi, sq->vq, 0);
1608
1609 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
1610 netif_tx_wake_queue(txq);
1611
1612 return 0;
1613}
1614
e9d7417b 1615static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
296f96fc 1616{
012873d0 1617 struct virtio_net_hdr_mrg_rxbuf *hdr;
296f96fc 1618 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
e9d7417b 1619 struct virtnet_info *vi = sq->vq->vdev->priv;
e2fcad58 1620 int num_sg;
012873d0 1621 unsigned hdr_len = vi->hdr_len;
e7428e95 1622 bool can_push;
296f96fc 1623
e174961c 1624 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
e7428e95
MT
1625
1626 can_push = vi->any_header_sg &&
1627 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
1628 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
1629 /* Even if we can, don't push here yet as this would skew
1630 * csum_start offset below. */
1631 if (can_push)
012873d0 1632 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
e7428e95
MT
1633 else
1634 hdr = skb_vnet_hdr(skb);
296f96fc 1635
e858fae2 1636 if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
fd3a8862
WB
1637 virtio_is_little_endian(vi->vdev), false,
1638 0))
85eb1389 1639 return -EPROTO;
296f96fc 1640
3f2c31d9 1641 if (vi->mergeable_rx_bufs)
012873d0 1642 hdr->num_buffers = 0;
3f2c31d9 1643
547c890c 1644 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
e7428e95
MT
1645 if (can_push) {
1646 __skb_push(skb, hdr_len);
1647 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
e2fcad58
JD
1648 if (unlikely(num_sg < 0))
1649 return num_sg;
e7428e95
MT
1650 /* Pull header back to avoid skew in tx bytes calculations. */
1651 __skb_pull(skb, hdr_len);
1652 } else {
1653 sg_set_buf(sq->sg, hdr, hdr_len);
e2fcad58
JD
1654 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
1655 if (unlikely(num_sg < 0))
1656 return num_sg;
1657 num_sg++;
e7428e95 1658 }
9dc7b9e4 1659 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
11a3a154
RR
1660}
1661
424efe9c 1662static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
99ffc696
RR
1663{
1664 struct virtnet_info *vi = netdev_priv(dev);
986a4f4d
JW
1665 int qnum = skb_get_queue_mapping(skb);
1666 struct send_queue *sq = &vi->sq[qnum];
9ed4cb07 1667 int err;
4b7fd2e6 1668 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
6b16f9ee 1669 bool kick = !netdev_xmit_more();
b92f1e67 1670 bool use_napi = sq->napi.weight;
2cb9c6ba 1671
2cb9c6ba 1672 /* Free up any pending old buffers before queueing new ones. */
df133f3f 1673 free_old_xmit_skbs(sq, false);
99ffc696 1674
bdb12e0d
WB
1675 if (use_napi && kick)
1676 virtqueue_enable_cb_delayed(sq->vq);
1677
074c3582
JK
1678 /* timestamp packet in software */
1679 skb_tx_timestamp(skb);
1680
03f191ba 1681 /* Try to transmit */
b7dfde95 1682 err = xmit_skb(sq, skb);
48925e37 1683
9ed4cb07 1684 /* This should not happen! */
681daee2 1685 if (unlikely(err)) {
9ed4cb07
RR
1686 dev->stats.tx_fifo_errors++;
1687 if (net_ratelimit())
1688 dev_warn(&dev->dev,
7934b481
YS
1689 "Unexpected TXQ (%d) queue failure: %d\n",
1690 qnum, err);
58eba97d 1691 dev->stats.tx_dropped++;
85e94525 1692 dev_kfree_skb_any(skb);
58eba97d 1693 return NETDEV_TX_OK;
296f96fc 1694 }
03f191ba 1695
48925e37 1696 /* Don't wait up for transmitted skbs to be freed. */
b92f1e67
WB
1697 if (!use_napi) {
1698 skb_orphan(skb);
895b5c9f 1699 nf_reset_ct(skb);
b92f1e67 1700 }
48925e37 1701
60302ff6
MT
1702 /* If running out of space, stop queue to avoid getting packets that we
1703 * are then unable to transmit.
1704 * An alternative would be to force queuing layer to requeue the skb by
1705 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
1706 * returned in a normal path of operation: it means that driver is not
1707 * maintaining the TX queue stop/start state properly, and causes
1708 * the stack to do a non-trivial amount of useless work.
1709 * Since most packets only take 1 or 2 ring slots, stopping the queue
1710 * early means 16 slots are typically wasted.
d631b94e 1711 */
b7dfde95 1712 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
986a4f4d 1713 netif_stop_subqueue(dev, qnum);
b92f1e67
WB
1714 if (!use_napi &&
1715 unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
48925e37 1716 /* More just got used, free them then recheck. */
df133f3f 1717 free_old_xmit_skbs(sq, false);
b7dfde95 1718 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
986a4f4d 1719 netif_start_subqueue(dev, qnum);
e9d7417b 1720 virtqueue_disable_cb(sq->vq);
48925e37
RR
1721 }
1722 }
99ffc696 1723 }
48925e37 1724
461f03dc
TM
1725 if (kick || netif_xmit_stopped(txq)) {
1726 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
1727 u64_stats_update_begin(&sq->stats.syncp);
1728 sq->stats.kicks++;
1729 u64_stats_update_end(&sq->stats.syncp);
1730 }
1731 }
296f96fc 1732
0b725a2c 1733 return NETDEV_TX_OK;
c223a078
DM
1734}
1735
40cbfc37
AK
1736/*
1737 * Send command via the control virtqueue and check status. Commands
1738 * supported by the hypervisor, as indicated by feature bits, should
788a8b6d 1739 * never fail unless improperly formatted.
40cbfc37
AK
1740 */
1741static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
d24bae32 1742 struct scatterlist *out)
40cbfc37 1743{
f7bc9594 1744 struct scatterlist *sgs[4], hdr, stat;
d24bae32 1745 unsigned out_num = 0, tmp;
40cbfc37
AK
1746
1747 /* Caller should know better */
f7bc9594 1748 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
40cbfc37 1749
12e57169
MT
1750 vi->ctrl->status = ~0;
1751 vi->ctrl->hdr.class = class;
1752 vi->ctrl->hdr.cmd = cmd;
f7bc9594 1753 /* Add header */
12e57169 1754 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
f7bc9594 1755 sgs[out_num++] = &hdr;
40cbfc37 1756
f7bc9594
RR
1757 if (out)
1758 sgs[out_num++] = out;
40cbfc37 1759
f7bc9594 1760 /* Add return status. */
12e57169 1761 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
d24bae32 1762 sgs[out_num] = &stat;
40cbfc37 1763
d24bae32 1764 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
a7c58146 1765 virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
40cbfc37 1766
67975901 1767 if (unlikely(!virtqueue_kick(vi->cvq)))
12e57169 1768 return vi->ctrl->status == VIRTIO_NET_OK;
40cbfc37
AK
1769
1770 /* Spin for a response, the kick causes an ioport write, trapping
1771 * into the hypervisor, so the request should be handled immediately.
1772 */
047b9b94
HG
1773 while (!virtqueue_get_buf(vi->cvq, &tmp) &&
1774 !virtqueue_is_broken(vi->cvq))
40cbfc37
AK
1775 cpu_relax();
1776
12e57169 1777 return vi->ctrl->status == VIRTIO_NET_OK;
40cbfc37
AK
1778}
1779
9c46f6d4
AW
1780static int virtnet_set_mac_address(struct net_device *dev, void *p)
1781{
1782 struct virtnet_info *vi = netdev_priv(dev);
1783 struct virtio_device *vdev = vi->vdev;
f2f2c8b4 1784 int ret;
e37e2ff3 1785 struct sockaddr *addr;
7e58d5ae 1786 struct scatterlist sg;
9c46f6d4 1787
ba5e4426
SS
1788 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
1789 return -EOPNOTSUPP;
1790
801822d1 1791 addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
e37e2ff3
AL
1792 if (!addr)
1793 return -ENOMEM;
e37e2ff3
AL
1794
1795 ret = eth_prepare_mac_addr_change(dev, addr);
f2f2c8b4 1796 if (ret)
e37e2ff3 1797 goto out;
9c46f6d4 1798
7e58d5ae
AK
1799 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
1800 sg_init_one(&sg, addr->sa_data, dev->addr_len);
1801 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
d24bae32 1802 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
7e58d5ae
AK
1803 dev_warn(&vdev->dev,
1804 "Failed to set mac address by vq command.\n");
e37e2ff3
AL
1805 ret = -EINVAL;
1806 goto out;
7e58d5ae 1807 }
7e93a02f
MT
1808 } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
1809 !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
855e0c52
RR
1810 unsigned int i;
1811
1812 /* Naturally, this has an atomicity problem. */
1813 for (i = 0; i < dev->addr_len; i++)
1814 virtio_cwrite8(vdev,
1815 offsetof(struct virtio_net_config, mac) +
1816 i, addr->sa_data[i]);
7e58d5ae
AK
1817 }
1818
1819 eth_commit_mac_addr_change(dev, p);
e37e2ff3 1820 ret = 0;
9c46f6d4 1821
e37e2ff3
AL
1822out:
1823 kfree(addr);
1824 return ret;
9c46f6d4
AW
1825}
1826
bc1f4470 1827static void virtnet_stats(struct net_device *dev,
1828 struct rtnl_link_stats64 *tot)
3fa2a1df 1829{
1830 struct virtnet_info *vi = netdev_priv(dev);
3fa2a1df 1831 unsigned int start;
d7dfc5cf 1832 int i;
3fa2a1df 1833
d7dfc5cf 1834 for (i = 0; i < vi->max_queue_pairs; i++) {
2c4a2f7d 1835 u64 tpackets, tbytes, rpackets, rbytes, rdrops;
d7dfc5cf
TM
1836 struct receive_queue *rq = &vi->rq[i];
1837 struct send_queue *sq = &vi->sq[i];
3fa2a1df 1838
1839 do {
d7dfc5cf
TM
1840 start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
1841 tpackets = sq->stats.packets;
1842 tbytes = sq->stats.bytes;
1843 } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
83a27052
ED
1844
1845 do {
d7dfc5cf 1846 start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
d46eeeaf
JW
1847 rpackets = rq->stats.packets;
1848 rbytes = rq->stats.bytes;
1849 rdrops = rq->stats.drops;
d7dfc5cf 1850 } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
3fa2a1df 1851
1852 tot->rx_packets += rpackets;
1853 tot->tx_packets += tpackets;
1854 tot->rx_bytes += rbytes;
1855 tot->tx_bytes += tbytes;
2c4a2f7d 1856 tot->rx_dropped += rdrops;
3fa2a1df 1857 }
1858
1859 tot->tx_dropped = dev->stats.tx_dropped;
021ac8d3 1860 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
3fa2a1df 1861 tot->rx_length_errors = dev->stats.rx_length_errors;
1862 tot->rx_frame_errors = dev->stats.rx_frame_errors;
3fa2a1df 1863}
1864
586d17c5
JW
1865static void virtnet_ack_link_announce(struct virtnet_info *vi)
1866{
1867 rtnl_lock();
1868 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
d24bae32 1869 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
586d17c5
JW
1870 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
1871 rtnl_unlock();
1872}
1873
47315329 1874static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
986a4f4d
JW
1875{
1876 struct scatterlist sg;
986a4f4d
JW
1877 struct net_device *dev = vi->dev;
1878
1879 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
1880 return 0;
1881
12e57169
MT
1882 vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
1883 sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
986a4f4d
JW
1884
1885 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
d24bae32 1886 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
986a4f4d
JW
1887 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
1888 queue_pairs);
1889 return -EINVAL;
55257d72 1890 } else {
986a4f4d 1891 vi->curr_queue_pairs = queue_pairs;
35ed159b
JW
1892 /* virtnet_open() will refill when device is going to up. */
1893 if (dev->flags & IFF_UP)
1894 schedule_delayed_work(&vi->refill, 0);
55257d72 1895 }
986a4f4d
JW
1896
1897 return 0;
1898}
1899
47315329
JF
1900static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1901{
1902 int err;
1903
1904 rtnl_lock();
1905 err = _virtnet_set_queues(vi, queue_pairs);
1906 rtnl_unlock();
1907 return err;
1908}
1909
296f96fc
RR
1910static int virtnet_close(struct net_device *dev)
1911{
1912 struct virtnet_info *vi = netdev_priv(dev);
986a4f4d 1913 int i;
296f96fc 1914
b2baed69
RR
1915 /* Make sure refill_work doesn't re-enable napi! */
1916 cancel_delayed_work_sync(&vi->refill);
986a4f4d 1917
b92f1e67 1918 for (i = 0; i < vi->max_queue_pairs; i++) {
754b8a21 1919 xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
986a4f4d 1920 napi_disable(&vi->rq[i].napi);
78a57b48 1921 virtnet_napi_tx_disable(&vi->sq[i].napi);
b92f1e67 1922 }
296f96fc 1923
296f96fc
RR
1924 return 0;
1925}
1926
2af7698e
AW
1927static void virtnet_set_rx_mode(struct net_device *dev)
1928{
1929 struct virtnet_info *vi = netdev_priv(dev);
f565a7c2 1930 struct scatterlist sg[2];
f565a7c2 1931 struct virtio_net_ctrl_mac *mac_data;
ccffad25 1932 struct netdev_hw_addr *ha;
32e7bfc4 1933 int uc_count;
4cd24eaf 1934 int mc_count;
f565a7c2
AW
1935 void *buf;
1936 int i;
2af7698e 1937
788a8b6d 1938 /* We can't dynamically set ndo_set_rx_mode, so return gracefully */
2af7698e
AW
1939 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
1940 return;
1941
12e57169
MT
1942 vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
1943 vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
2af7698e 1944
12e57169 1945 sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
2af7698e
AW
1946
1947 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
d24bae32 1948 VIRTIO_NET_CTRL_RX_PROMISC, sg))
2af7698e 1949 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
12e57169 1950 vi->ctrl->promisc ? "en" : "dis");
2af7698e 1951
12e57169 1952 sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
2af7698e
AW
1953
1954 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
d24bae32 1955 VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
2af7698e 1956 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
12e57169 1957 vi->ctrl->allmulti ? "en" : "dis");
f565a7c2 1958
32e7bfc4 1959 uc_count = netdev_uc_count(dev);
4cd24eaf 1960 mc_count = netdev_mc_count(dev);
f565a7c2 1961 /* MAC filter - use one buffer for both lists */
4cd24eaf
JP
1962 buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
1963 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
1964 mac_data = buf;
e68ed8f0 1965 if (!buf)
f565a7c2 1966 return;
f565a7c2 1967
23e258e1
AW
1968 sg_init_table(sg, 2);
1969
f565a7c2 1970 /* Store the unicast list and count in the front of the buffer */
fdd819b2 1971 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
ccffad25 1972 i = 0;
32e7bfc4 1973 netdev_for_each_uc_addr(ha, dev)
ccffad25 1974 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
f565a7c2
AW
1975
1976 sg_set_buf(&sg[0], mac_data,
32e7bfc4 1977 sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
f565a7c2
AW
1978
1979 /* multicast list and count fill the end */
32e7bfc4 1980 mac_data = (void *)&mac_data->macs[uc_count][0];
f565a7c2 1981
fdd819b2 1982 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
567ec874 1983 i = 0;
22bedad3
JP
1984 netdev_for_each_mc_addr(ha, dev)
1985 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
f565a7c2
AW
1986
1987 sg_set_buf(&sg[1], mac_data,
4cd24eaf 1988 sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
f565a7c2
AW
1989
1990 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
d24bae32 1991 VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
99e872ae 1992 dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
f565a7c2
AW
1993
1994 kfree(buf);
2af7698e
AW
1995}
1996
80d5c368
PM
1997static int virtnet_vlan_rx_add_vid(struct net_device *dev,
1998 __be16 proto, u16 vid)
0bde9569
AW
1999{
2000 struct virtnet_info *vi = netdev_priv(dev);
2001 struct scatterlist sg;
2002
d7fad4c8 2003 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
12e57169 2004 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
0bde9569
AW
2005
2006 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
d24bae32 2007 VIRTIO_NET_CTRL_VLAN_ADD, &sg))
0bde9569 2008 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
8e586137 2009 return 0;
0bde9569
AW
2010}
2011
80d5c368
PM
2012static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
2013 __be16 proto, u16 vid)
0bde9569
AW
2014{
2015 struct virtnet_info *vi = netdev_priv(dev);
2016 struct scatterlist sg;
2017
d7fad4c8 2018 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
12e57169 2019 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
0bde9569
AW
2020
2021 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
d24bae32 2022 VIRTIO_NET_CTRL_VLAN_DEL, &sg))
0bde9569 2023 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
8e586137 2024 return 0;
0bde9569
AW
2025}
2026
310974fa 2027static void virtnet_clean_affinity(struct virtnet_info *vi)
986a4f4d
JW
2028{
2029 int i;
2030
8898c21c
WG
2031 if (vi->affinity_hint_set) {
2032 for (i = 0; i < vi->max_queue_pairs; i++) {
19e226e8
CR
2033 virtqueue_set_affinity(vi->rq[i].vq, NULL);
2034 virtqueue_set_affinity(vi->sq[i].vq, NULL);
47be2479
WG
2035 }
2036
8898c21c
WG
2037 vi->affinity_hint_set = false;
2038 }
8898c21c 2039}
47be2479 2040
8898c21c
WG
2041static void virtnet_set_affinity(struct virtnet_info *vi)
2042{
2ca653d6
CR
2043 cpumask_var_t mask;
2044 int stragglers;
2045 int group_size;
2046 int i, j, cpu;
2047 int num_cpu;
2048 int stride;
2049
2050 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
310974fa 2051 virtnet_clean_affinity(vi);
8898c21c 2052 return;
986a4f4d
JW
2053 }
2054
2ca653d6
CR
2055 num_cpu = num_online_cpus();
2056 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
2057 stragglers = num_cpu >= vi->curr_queue_pairs ?
2058 num_cpu % vi->curr_queue_pairs :
2059 0;
2060 cpu = cpumask_next(-1, cpu_online_mask);
4d99f660 2061
2ca653d6
CR
2062 for (i = 0; i < vi->curr_queue_pairs; i++) {
2063 group_size = stride + (i < stragglers ? 1 : 0);
2064
2065 for (j = 0; j < group_size; j++) {
2066 cpumask_set_cpu(cpu, mask);
2067 cpu = cpumask_next_wrap(cpu, cpu_online_mask,
2068 nr_cpu_ids, false);
2069 }
2070 virtqueue_set_affinity(vi->rq[i].vq, mask);
2071 virtqueue_set_affinity(vi->sq[i].vq, mask);
044ab86d 2072 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
2ca653d6 2073 cpumask_clear(mask);
986a4f4d
JW
2074 }
2075
8898c21c 2076 vi->affinity_hint_set = true;
2ca653d6 2077 free_cpumask_var(mask);
986a4f4d
JW
2078}
2079
8017c279 2080static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
8de4b2f3 2081{
8017c279
SAS
2082 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2083 node);
2084 virtnet_set_affinity(vi);
2085 return 0;
2086}
8de4b2f3 2087
8017c279
SAS
2088static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
2089{
2090 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2091 node_dead);
2092 virtnet_set_affinity(vi);
2093 return 0;
2094}
3ab098df 2095
8017c279
SAS
2096static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
2097{
2098 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2099 node);
2100
310974fa 2101 virtnet_clean_affinity(vi);
8017c279
SAS
2102 return 0;
2103}
2104
2105static enum cpuhp_state virtionet_online;
2106
2107static int virtnet_cpu_notif_add(struct virtnet_info *vi)
2108{
2109 int ret;
2110
2111 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
2112 if (ret)
2113 return ret;
2114 ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2115 &vi->node_dead);
2116 if (!ret)
2117 return ret;
2118 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2119 return ret;
2120}
2121
2122static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
2123{
2124 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2125 cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2126 &vi->node_dead);
986a4f4d
JW
2127}
2128
8f9f4668
RJ
2129static void virtnet_get_ringparam(struct net_device *dev,
2130 struct ethtool_ringparam *ring)
2131{
2132 struct virtnet_info *vi = netdev_priv(dev);
2133
986a4f4d
JW
2134 ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2135 ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
8f9f4668
RJ
2136 ring->rx_pending = ring->rx_max_pending;
2137 ring->tx_pending = ring->tx_max_pending;
8f9f4668
RJ
2138}
2139
66846048
RJ
2140
2141static void virtnet_get_drvinfo(struct net_device *dev,
2142 struct ethtool_drvinfo *info)
2143{
2144 struct virtnet_info *vi = netdev_priv(dev);
2145 struct virtio_device *vdev = vi->vdev;
2146
2147 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
2148 strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
2149 strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
2150
2151}
2152
d73bcd2c
JW
2153/* TODO: Eliminate OOO packets during switching */
2154static int virtnet_set_channels(struct net_device *dev,
2155 struct ethtool_channels *channels)
2156{
2157 struct virtnet_info *vi = netdev_priv(dev);
2158 u16 queue_pairs = channels->combined_count;
2159 int err;
2160
2161 /* We don't support separate rx/tx channels.
2162 * We don't allow setting 'other' channels.
2163 */
2164 if (channels->rx_count || channels->tx_count || channels->other_count)
2165 return -EINVAL;
2166
c18e9cd6 2167 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
d73bcd2c
JW
2168 return -EINVAL;
2169
f600b690
JF
2170 /* For now we don't support modifying channels while XDP is loaded
2171 * also when XDP is loaded all RX queues have XDP programs so we only
2172 * need to check a single RX queue.
2173 */
2174 if (vi->rq[0].xdp_prog)
2175 return -EINVAL;
2176
47be2479 2177 get_online_cpus();
47315329 2178 err = _virtnet_set_queues(vi, queue_pairs);
de33212f
JD
2179 if (err) {
2180 put_online_cpus();
2181 goto err;
d73bcd2c 2182 }
de33212f 2183 virtnet_set_affinity(vi);
47be2479 2184 put_online_cpus();
d73bcd2c 2185
de33212f
JD
2186 netif_set_real_num_tx_queues(dev, queue_pairs);
2187 netif_set_real_num_rx_queues(dev, queue_pairs);
2188 err:
d73bcd2c
JW
2189 return err;
2190}
2191
d7dfc5cf
TM
2192static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2193{
2194 struct virtnet_info *vi = netdev_priv(dev);
d7dfc5cf 2195 unsigned int i, j;
d7a9a01b 2196 u8 *p = data;
d7dfc5cf
TM
2197
2198 switch (stringset) {
2199 case ETH_SS_STATS:
2200 for (i = 0; i < vi->curr_queue_pairs; i++) {
d7a9a01b
AD
2201 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
2202 ethtool_sprintf(&p, "rx_queue_%u_%s", i,
2203 virtnet_rq_stats_desc[j].desc);
d7dfc5cf
TM
2204 }
2205
2206 for (i = 0; i < vi->curr_queue_pairs; i++) {
d7a9a01b
AD
2207 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
2208 ethtool_sprintf(&p, "tx_queue_%u_%s", i,
2209 virtnet_sq_stats_desc[j].desc);
d7dfc5cf
TM
2210 }
2211 break;
2212 }
2213}
2214
2215static int virtnet_get_sset_count(struct net_device *dev, int sset)
2216{
2217 struct virtnet_info *vi = netdev_priv(dev);
2218
2219 switch (sset) {
2220 case ETH_SS_STATS:
2221 return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
2222 VIRTNET_SQ_STATS_LEN);
2223 default:
2224 return -EOPNOTSUPP;
2225 }
2226}
2227
2228static void virtnet_get_ethtool_stats(struct net_device *dev,
2229 struct ethtool_stats *stats, u64 *data)
2230{
2231 struct virtnet_info *vi = netdev_priv(dev);
2232 unsigned int idx = 0, start, i, j;
2233 const u8 *stats_base;
2234 size_t offset;
2235
2236 for (i = 0; i < vi->curr_queue_pairs; i++) {
2237 struct receive_queue *rq = &vi->rq[i];
2238
d46eeeaf 2239 stats_base = (u8 *)&rq->stats;
d7dfc5cf
TM
2240 do {
2241 start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
2242 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
2243 offset = virtnet_rq_stats_desc[j].offset;
2244 data[idx + j] = *(u64 *)(stats_base + offset);
2245 }
2246 } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
2247 idx += VIRTNET_RQ_STATS_LEN;
2248 }
2249
2250 for (i = 0; i < vi->curr_queue_pairs; i++) {
2251 struct send_queue *sq = &vi->sq[i];
2252
2253 stats_base = (u8 *)&sq->stats;
2254 do {
2255 start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
2256 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
2257 offset = virtnet_sq_stats_desc[j].offset;
2258 data[idx + j] = *(u64 *)(stats_base + offset);
2259 }
2260 } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
2261 idx += VIRTNET_SQ_STATS_LEN;
2262 }
2263}
2264
d73bcd2c
JW
2265static void virtnet_get_channels(struct net_device *dev,
2266 struct ethtool_channels *channels)
2267{
2268 struct virtnet_info *vi = netdev_priv(dev);
2269
2270 channels->combined_count = vi->curr_queue_pairs;
2271 channels->max_combined = vi->max_queue_pairs;
2272 channels->max_other = 0;
2273 channels->rx_count = 0;
2274 channels->tx_count = 0;
2275 channels->other_count = 0;
2276}
2277
ebb6b4b1
PR
2278static int virtnet_set_link_ksettings(struct net_device *dev,
2279 const struct ethtool_link_ksettings *cmd)
16032be5
NA
2280{
2281 struct virtnet_info *vi = netdev_priv(dev);
16032be5 2282
9aedc6e2
CF
2283 return ethtool_virtdev_set_link_ksettings(dev, cmd,
2284 &vi->speed, &vi->duplex);
16032be5
NA
2285}
2286
ebb6b4b1
PR
2287static int virtnet_get_link_ksettings(struct net_device *dev,
2288 struct ethtool_link_ksettings *cmd)
16032be5
NA
2289{
2290 struct virtnet_info *vi = netdev_priv(dev);
2291
ebb6b4b1
PR
2292 cmd->base.speed = vi->speed;
2293 cmd->base.duplex = vi->duplex;
2294 cmd->base.port = PORT_OTHER;
16032be5
NA
2295
2296 return 0;
2297}
2298
0c465be1
JW
2299static int virtnet_set_coalesce(struct net_device *dev,
2300 struct ethtool_coalesce *ec)
2301{
0c465be1
JW
2302 struct virtnet_info *vi = netdev_priv(dev);
2303 int i, napi_weight;
2304
a51e5206
JK
2305 if (ec->tx_max_coalesced_frames > 1 ||
2306 ec->rx_max_coalesced_frames != 1)
0c465be1
JW
2307 return -EINVAL;
2308
0c465be1 2309 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
0c465be1
JW
2310 if (napi_weight ^ vi->sq[0].napi.weight) {
2311 if (dev->flags & IFF_UP)
2312 return -EBUSY;
2313 for (i = 0; i < vi->max_queue_pairs; i++)
2314 vi->sq[i].napi.weight = napi_weight;
2315 }
2316
2317 return 0;
2318}
2319
2320static int virtnet_get_coalesce(struct net_device *dev,
2321 struct ethtool_coalesce *ec)
2322{
2323 struct ethtool_coalesce ec_default = {
2324 .cmd = ETHTOOL_GCOALESCE,
2325 .rx_max_coalesced_frames = 1,
2326 };
2327 struct virtnet_info *vi = netdev_priv(dev);
2328
2329 memcpy(ec, &ec_default, sizeof(ec_default));
2330
2331 if (vi->sq[0].napi.weight)
2332 ec->tx_max_coalesced_frames = 1;
2333
2334 return 0;
2335}
2336
16032be5
NA
2337static void virtnet_init_settings(struct net_device *dev)
2338{
2339 struct virtnet_info *vi = netdev_priv(dev);
2340
2341 vi->speed = SPEED_UNKNOWN;
2342 vi->duplex = DUPLEX_UNKNOWN;
2343}
2344
faa9b39f
JB
2345static void virtnet_update_settings(struct virtnet_info *vi)
2346{
2347 u32 speed;
2348 u8 duplex;
2349
2350 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
2351 return;
2352
64ffa39d
MT
2353 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
2354
faa9b39f
JB
2355 if (ethtool_validate_speed(speed))
2356 vi->speed = speed;
64ffa39d
MT
2357
2358 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
2359
faa9b39f
JB
2360 if (ethtool_validate_duplex(duplex))
2361 vi->duplex = duplex;
2362}
2363
0fc0b732 2364static const struct ethtool_ops virtnet_ethtool_ops = {
a51e5206 2365 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
66846048 2366 .get_drvinfo = virtnet_get_drvinfo,
9f4d26d0 2367 .get_link = ethtool_op_get_link,
8f9f4668 2368 .get_ringparam = virtnet_get_ringparam,
d7dfc5cf
TM
2369 .get_strings = virtnet_get_strings,
2370 .get_sset_count = virtnet_get_sset_count,
2371 .get_ethtool_stats = virtnet_get_ethtool_stats,
d73bcd2c
JW
2372 .set_channels = virtnet_set_channels,
2373 .get_channels = virtnet_get_channels,
074c3582 2374 .get_ts_info = ethtool_op_get_ts_info,
ebb6b4b1
PR
2375 .get_link_ksettings = virtnet_get_link_ksettings,
2376 .set_link_ksettings = virtnet_set_link_ksettings,
0c465be1
JW
2377 .set_coalesce = virtnet_set_coalesce,
2378 .get_coalesce = virtnet_get_coalesce,
a9ea3fc6
HX
2379};
2380
9fe7bfce
JF
2381static void virtnet_freeze_down(struct virtio_device *vdev)
2382{
2383 struct virtnet_info *vi = vdev->priv;
2384 int i;
2385
2386 /* Make sure no work handler is accessing the device */
2387 flush_work(&vi->config_work);
2388
05c998b7 2389 netif_tx_lock_bh(vi->dev);
9fe7bfce 2390 netif_device_detach(vi->dev);
05c998b7 2391 netif_tx_unlock_bh(vi->dev);
9fe7bfce
JF
2392 cancel_delayed_work_sync(&vi->refill);
2393
2394 if (netif_running(vi->dev)) {
b92f1e67 2395 for (i = 0; i < vi->max_queue_pairs; i++) {
9fe7bfce 2396 napi_disable(&vi->rq[i].napi);
78a57b48 2397 virtnet_napi_tx_disable(&vi->sq[i].napi);
b92f1e67 2398 }
9fe7bfce
JF
2399 }
2400}
2401
2402static int init_vqs(struct virtnet_info *vi);
2403
2404static int virtnet_restore_up(struct virtio_device *vdev)
2405{
2406 struct virtnet_info *vi = vdev->priv;
2407 int err, i;
2408
2409 err = init_vqs(vi);
2410 if (err)
2411 return err;
2412
2413 virtio_device_ready(vdev);
2414
2415 if (netif_running(vi->dev)) {
2416 for (i = 0; i < vi->curr_queue_pairs; i++)
2417 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
2418 schedule_delayed_work(&vi->refill, 0);
2419
b92f1e67 2420 for (i = 0; i < vi->max_queue_pairs; i++) {
e4e8452a 2421 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
b92f1e67
WB
2422 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2423 &vi->sq[i].napi);
2424 }
9fe7bfce
JF
2425 }
2426
05c998b7 2427 netif_tx_lock_bh(vi->dev);
9fe7bfce 2428 netif_device_attach(vi->dev);
05c998b7 2429 netif_tx_unlock_bh(vi->dev);
9fe7bfce
JF
2430 return err;
2431}
2432
3f93522f
JW
2433static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
2434{
2435 struct scatterlist sg;
12e57169 2436 vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
3f93522f 2437
12e57169 2438 sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
3f93522f
JW
2439
2440 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
2441 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
7934b481 2442 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
3f93522f
JW
2443 return -EINVAL;
2444 }
2445
2446 return 0;
2447}
2448
2449static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
2450{
2451 u64 offloads = 0;
2452
2453 if (!vi->guest_offloads)
2454 return 0;
2455
3f93522f
JW
2456 return virtnet_set_guest_offloads(vi, offloads);
2457}
2458
2459static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
2460{
2461 u64 offloads = vi->guest_offloads;
2462
2463 if (!vi->guest_offloads)
2464 return 0;
3f93522f
JW
2465
2466 return virtnet_set_guest_offloads(vi, offloads);
2467}
2468
9861ce03
JK
2469static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2470 struct netlink_ext_ack *extack)
f600b690
JF
2471{
2472 unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
2473 struct virtnet_info *vi = netdev_priv(dev);
2474 struct bpf_prog *old_prog;
017b29c3 2475 u16 xdp_qp = 0, curr_qp;
672aafd5 2476 int i, err;
f600b690 2477
3f93522f
JW
2478 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
2479 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
2480 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
2481 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
18ba58e1
JW
2482 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
2483 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
2484 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first");
f600b690
JF
2485 return -EOPNOTSUPP;
2486 }
2487
2488 if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
4d463c4d 2489 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
f600b690
JF
2490 return -EINVAL;
2491 }
2492
2493 if (dev->mtu > max_sz) {
4d463c4d 2494 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
f600b690
JF
2495 netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
2496 return -EINVAL;
2497 }
2498
672aafd5
JF
2499 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
2500 if (prog)
2501 xdp_qp = nr_cpu_ids;
2502
2503 /* XDP requires extra queues for XDP_TX */
2504 if (curr_qp + xdp_qp > vi->max_queue_pairs) {
97c2c69e 2505 netdev_warn(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
672aafd5 2506 curr_qp + xdp_qp, vi->max_queue_pairs);
97c2c69e 2507 xdp_qp = 0;
672aafd5
JF
2508 }
2509
03aa6d34
TM
2510 old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
2511 if (!prog && !old_prog)
2512 return 0;
2513
85192dbf
AN
2514 if (prog)
2515 bpf_prog_add(prog, vi->max_queue_pairs - 1);
2de2f7f4 2516
4941d472 2517 /* Make sure NAPI is not using any XDP TX queues for RX. */
534da5e8
TM
2518 if (netif_running(dev)) {
2519 for (i = 0; i < vi->max_queue_pairs; i++) {
4e09ff53 2520 napi_disable(&vi->rq[i].napi);
534da5e8
TM
2521 virtnet_napi_tx_disable(&vi->sq[i].napi);
2522 }
2523 }
f600b690 2524
03aa6d34
TM
2525 if (!prog) {
2526 for (i = 0; i < vi->max_queue_pairs; i++) {
2527 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
2528 if (i == 0)
2529 virtnet_restore_guest_offloads(vi);
2530 }
2531 synchronize_net();
2532 }
f600b690 2533
4941d472
JW
2534 err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
2535 if (err)
2536 goto err;
188313c1 2537 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
4941d472 2538 vi->xdp_queue_pairs = xdp_qp;
672aafd5 2539
03aa6d34 2540 if (prog) {
97c2c69e 2541 vi->xdp_enabled = true;
03aa6d34
TM
2542 for (i = 0; i < vi->max_queue_pairs; i++) {
2543 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
2544 if (i == 0 && !old_prog)
3f93522f 2545 virtnet_clear_guest_offloads(vi);
3f93522f 2546 }
97c2c69e
XZ
2547 } else {
2548 vi->xdp_enabled = false;
03aa6d34
TM
2549 }
2550
2551 for (i = 0; i < vi->max_queue_pairs; i++) {
f600b690
JF
2552 if (old_prog)
2553 bpf_prog_put(old_prog);
534da5e8 2554 if (netif_running(dev)) {
4e09ff53 2555 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
534da5e8
TM
2556 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2557 &vi->sq[i].napi);
2558 }
f600b690
JF
2559 }
2560
2561 return 0;
2de2f7f4 2562
4941d472 2563err:
03aa6d34
TM
2564 if (!prog) {
2565 virtnet_clear_guest_offloads(vi);
2566 for (i = 0; i < vi->max_queue_pairs; i++)
2567 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
2568 }
2569
8be4d9a4 2570 if (netif_running(dev)) {
534da5e8 2571 for (i = 0; i < vi->max_queue_pairs; i++) {
8be4d9a4 2572 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
534da5e8
TM
2573 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2574 &vi->sq[i].napi);
2575 }
8be4d9a4 2576 }
2de2f7f4
JF
2577 if (prog)
2578 bpf_prog_sub(prog, vi->max_queue_pairs - 1);
2579 return err;
f600b690
JF
2580}
2581
f4e63525 2582static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
f600b690
JF
2583{
2584 switch (xdp->command) {
2585 case XDP_SETUP_PROG:
9861ce03 2586 return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
f600b690
JF
2587 default:
2588 return -EINVAL;
2589 }
2590}
2591
ba5e4426
SS
2592static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
2593 size_t len)
2594{
2595 struct virtnet_info *vi = netdev_priv(dev);
2596 int ret;
2597
2598 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
2599 return -EOPNOTSUPP;
2600
2601 ret = snprintf(buf, len, "sby");
2602 if (ret >= len)
2603 return -EOPNOTSUPP;
2604
2605 return 0;
2606}
2607
a02e8964
WB
2608static int virtnet_set_features(struct net_device *dev,
2609 netdev_features_t features)
2610{
2611 struct virtnet_info *vi = netdev_priv(dev);
cf8691cb 2612 u64 offloads;
a02e8964
WB
2613 int err;
2614
3618ad2a 2615 if ((dev->features ^ features) & NETIF_F_LRO) {
97c2c69e 2616 if (vi->xdp_enabled)
cf8691cb
MT
2617 return -EBUSY;
2618
a02e8964 2619 if (features & NETIF_F_LRO)
cf8691cb 2620 offloads = vi->guest_offloads_capable;
a02e8964 2621 else
cf8691cb
MT
2622 offloads = vi->guest_offloads_capable &
2623 ~GUEST_OFFLOAD_LRO_MASK;
a02e8964 2624
cf8691cb
MT
2625 err = virtnet_set_guest_offloads(vi, offloads);
2626 if (err)
2627 return err;
2628 vi->guest_offloads = offloads;
a02e8964
WB
2629 }
2630
2631 return 0;
2632}
2633
76288b4e
SH
2634static const struct net_device_ops virtnet_netdev = {
2635 .ndo_open = virtnet_open,
2636 .ndo_stop = virtnet_close,
2637 .ndo_start_xmit = start_xmit,
2638 .ndo_validate_addr = eth_validate_addr,
9c46f6d4 2639 .ndo_set_mac_address = virtnet_set_mac_address,
2af7698e 2640 .ndo_set_rx_mode = virtnet_set_rx_mode,
3fa2a1df 2641 .ndo_get_stats64 = virtnet_stats,
1824a989
AW
2642 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
2643 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
f4e63525 2644 .ndo_bpf = virtnet_xdp,
186b3c99 2645 .ndo_xdp_xmit = virtnet_xdp_xmit,
2836b4f2 2646 .ndo_features_check = passthru_features_check,
ba5e4426 2647 .ndo_get_phys_port_name = virtnet_get_phys_port_name,
a02e8964 2648 .ndo_set_features = virtnet_set_features,
76288b4e
SH
2649};
2650
586d17c5 2651static void virtnet_config_changed_work(struct work_struct *work)
9f4d26d0 2652{
586d17c5
JW
2653 struct virtnet_info *vi =
2654 container_of(work, struct virtnet_info, config_work);
9f4d26d0
MM
2655 u16 v;
2656
855e0c52
RR
2657 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
2658 struct virtio_net_config, status, &v) < 0)
507613bf 2659 return;
586d17c5
JW
2660
2661 if (v & VIRTIO_NET_S_ANNOUNCE) {
ee89bab1 2662 netdev_notify_peers(vi->dev);
586d17c5
JW
2663 virtnet_ack_link_announce(vi);
2664 }
9f4d26d0
MM
2665
2666 /* Ignore unknown (future) status bits */
2667 v &= VIRTIO_NET_S_LINK_UP;
2668
2669 if (vi->status == v)
507613bf 2670 return;
9f4d26d0
MM
2671
2672 vi->status = v;
2673
2674 if (vi->status & VIRTIO_NET_S_LINK_UP) {
faa9b39f 2675 virtnet_update_settings(vi);
9f4d26d0 2676 netif_carrier_on(vi->dev);
986a4f4d 2677 netif_tx_wake_all_queues(vi->dev);
9f4d26d0
MM
2678 } else {
2679 netif_carrier_off(vi->dev);
986a4f4d 2680 netif_tx_stop_all_queues(vi->dev);
9f4d26d0
MM
2681 }
2682}
2683
2684static void virtnet_config_changed(struct virtio_device *vdev)
2685{
2686 struct virtnet_info *vi = vdev->priv;
2687
3b07e9ca 2688 schedule_work(&vi->config_work);
9f4d26d0
MM
2689}
2690
986a4f4d
JW
2691static void virtnet_free_queues(struct virtnet_info *vi)
2692{
d4fb84ee
AV
2693 int i;
2694
ab3971b1 2695 for (i = 0; i < vi->max_queue_pairs; i++) {
5198d545
JK
2696 __netif_napi_del(&vi->rq[i].napi);
2697 __netif_napi_del(&vi->sq[i].napi);
ab3971b1 2698 }
d4fb84ee 2699
5198d545 2700 /* We called __netif_napi_del(),
963abe5c
ED
2701 * we need to respect an RCU grace period before freeing vi->rq
2702 */
2703 synchronize_net();
2704
986a4f4d
JW
2705 kfree(vi->rq);
2706 kfree(vi->sq);
12e57169 2707 kfree(vi->ctrl);
986a4f4d
JW
2708}
2709
47315329 2710static void _free_receive_bufs(struct virtnet_info *vi)
986a4f4d 2711{
f600b690 2712 struct bpf_prog *old_prog;
986a4f4d
JW
2713 int i;
2714
2715 for (i = 0; i < vi->max_queue_pairs; i++) {
2716 while (vi->rq[i].pages)
2717 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
f600b690
JF
2718
2719 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
2720 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
2721 if (old_prog)
2722 bpf_prog_put(old_prog);
986a4f4d 2723 }
47315329
JF
2724}
2725
2726static void free_receive_bufs(struct virtnet_info *vi)
2727{
2728 rtnl_lock();
2729 _free_receive_bufs(vi);
f600b690 2730 rtnl_unlock();
986a4f4d
JW
2731}
2732
fb51879d
MD
2733static void free_receive_page_frags(struct virtnet_info *vi)
2734{
2735 int i;
2736 for (i = 0; i < vi->max_queue_pairs; i++)
2737 if (vi->rq[i].alloc_frag.page)
2738 put_page(vi->rq[i].alloc_frag.page);
2739}
2740
986a4f4d
JW
2741static void free_unused_bufs(struct virtnet_info *vi)
2742{
2743 void *buf;
2744 int i;
2745
2746 for (i = 0; i < vi->max_queue_pairs; i++) {
2747 struct virtqueue *vq = vi->sq[i].vq;
56434a01 2748 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
5050471d 2749 if (!is_xdp_frame(buf))
56434a01
JF
2750 dev_kfree_skb(buf);
2751 else
5050471d 2752 xdp_return_frame(ptr_to_xdp(buf));
56434a01 2753 }
986a4f4d
JW
2754 }
2755
2756 for (i = 0; i < vi->max_queue_pairs; i++) {
2757 struct virtqueue *vq = vi->rq[i].vq;
2758
2759 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
ab7db917 2760 if (vi->mergeable_rx_bufs) {
680557cf 2761 put_page(virt_to_head_page(buf));
ab7db917 2762 } else if (vi->big_packets) {
fa9fac17 2763 give_pages(&vi->rq[i], buf);
ab7db917 2764 } else {
f6b10209 2765 put_page(virt_to_head_page(buf));
ab7db917 2766 }
986a4f4d 2767 }
986a4f4d
JW
2768 }
2769}
2770
e9d7417b
JW
2771static void virtnet_del_vqs(struct virtnet_info *vi)
2772{
2773 struct virtio_device *vdev = vi->vdev;
2774
310974fa 2775 virtnet_clean_affinity(vi);
986a4f4d 2776
e9d7417b 2777 vdev->config->del_vqs(vdev);
986a4f4d
JW
2778
2779 virtnet_free_queues(vi);
e9d7417b
JW
2780}
2781
d85b758f
MT
2782/* How large should a single buffer be so a queue full of these can fit at
2783 * least one full packet?
2784 * Logic below assumes the mergeable buffer header is used.
2785 */
2786static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
2787{
2788 const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2789 unsigned int rq_size = virtqueue_get_vring_size(vq);
2790 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
2791 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
2792 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
2793
f0c3192c
MT
2794 return max(max(min_buf_len, hdr_len) - hdr_len,
2795 (unsigned int)GOOD_PACKET_LEN);
d85b758f
MT
2796}
2797
986a4f4d 2798static int virtnet_find_vqs(struct virtnet_info *vi)
3f9c10b0 2799{
986a4f4d
JW
2800 vq_callback_t **callbacks;
2801 struct virtqueue **vqs;
2802 int ret = -ENOMEM;
2803 int i, total_vqs;
2804 const char **names;
d45b897b 2805 bool *ctx;
986a4f4d
JW
2806
2807 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
2808 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
2809 * possible control vq.
2810 */
2811 total_vqs = vi->max_queue_pairs * 2 +
2812 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
2813
2814 /* Allocate space for find_vqs parameters */
6396bb22 2815 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
986a4f4d
JW
2816 if (!vqs)
2817 goto err_vq;
6da2ec56 2818 callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
986a4f4d
JW
2819 if (!callbacks)
2820 goto err_callback;
6da2ec56 2821 names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
986a4f4d
JW
2822 if (!names)
2823 goto err_names;
192f68cf 2824 if (!vi->big_packets || vi->mergeable_rx_bufs) {
6396bb22 2825 ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
d45b897b
MT
2826 if (!ctx)
2827 goto err_ctx;
2828 } else {
2829 ctx = NULL;
2830 }
986a4f4d
JW
2831
2832 /* Parameters for control virtqueue, if any */
2833 if (vi->has_cvq) {
2834 callbacks[total_vqs - 1] = NULL;
2835 names[total_vqs - 1] = "control";
2836 }
3f9c10b0 2837
986a4f4d
JW
2838 /* Allocate/initialize parameters for send/receive virtqueues */
2839 for (i = 0; i < vi->max_queue_pairs; i++) {
2840 callbacks[rxq2vq(i)] = skb_recv_done;
2841 callbacks[txq2vq(i)] = skb_xmit_done;
2842 sprintf(vi->rq[i].name, "input.%d", i);
2843 sprintf(vi->sq[i].name, "output.%d", i);
2844 names[rxq2vq(i)] = vi->rq[i].name;
2845 names[txq2vq(i)] = vi->sq[i].name;
d45b897b
MT
2846 if (ctx)
2847 ctx[rxq2vq(i)] = true;
986a4f4d 2848 }
3f9c10b0 2849
a2f7dc00
XT
2850 ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
2851 names, ctx, NULL);
986a4f4d
JW
2852 if (ret)
2853 goto err_find;
3f9c10b0 2854
986a4f4d
JW
2855 if (vi->has_cvq) {
2856 vi->cvq = vqs[total_vqs - 1];
3f9c10b0 2857 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
f646968f 2858 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3f9c10b0 2859 }
986a4f4d
JW
2860
2861 for (i = 0; i < vi->max_queue_pairs; i++) {
2862 vi->rq[i].vq = vqs[rxq2vq(i)];
d85b758f 2863 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
986a4f4d
JW
2864 vi->sq[i].vq = vqs[txq2vq(i)];
2865 }
2866
2fa3c8a8 2867 /* run here: ret == 0. */
986a4f4d 2868
986a4f4d
JW
2869
2870err_find:
d45b897b
MT
2871 kfree(ctx);
2872err_ctx:
986a4f4d
JW
2873 kfree(names);
2874err_names:
2875 kfree(callbacks);
2876err_callback:
2877 kfree(vqs);
2878err_vq:
2879 return ret;
2880}
2881
2882static int virtnet_alloc_queues(struct virtnet_info *vi)
2883{
2884 int i;
2885
122b84a1
MG
2886 if (vi->has_cvq) {
2887 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
2888 if (!vi->ctrl)
2889 goto err_ctrl;
2890 } else {
2891 vi->ctrl = NULL;
2892 }
6396bb22 2893 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
986a4f4d
JW
2894 if (!vi->sq)
2895 goto err_sq;
6396bb22 2896 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
008d4278 2897 if (!vi->rq)
986a4f4d
JW
2898 goto err_rq;
2899
2900 INIT_DELAYED_WORK(&vi->refill, refill_work);
2901 for (i = 0; i < vi->max_queue_pairs; i++) {
2902 vi->rq[i].pages = NULL;
2903 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
2904 napi_weight);
1d11e732
WB
2905 netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
2906 napi_tx ? napi_weight : 0);
986a4f4d
JW
2907
2908 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
5377d758 2909 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
986a4f4d 2910 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
d7dfc5cf
TM
2911
2912 u64_stats_init(&vi->rq[i].stats.syncp);
2913 u64_stats_init(&vi->sq[i].stats.syncp);
986a4f4d
JW
2914 }
2915
2916 return 0;
2917
2918err_rq:
2919 kfree(vi->sq);
2920err_sq:
12e57169
MT
2921 kfree(vi->ctrl);
2922err_ctrl:
986a4f4d
JW
2923 return -ENOMEM;
2924}
2925
2926static int init_vqs(struct virtnet_info *vi)
2927{
2928 int ret;
2929
2930 /* Allocate send & receive queues */
2931 ret = virtnet_alloc_queues(vi);
2932 if (ret)
2933 goto err;
2934
2935 ret = virtnet_find_vqs(vi);
2936 if (ret)
2937 goto err_free;
2938
47be2479 2939 get_online_cpus();
8898c21c 2940 virtnet_set_affinity(vi);
47be2479
WG
2941 put_online_cpus();
2942
986a4f4d
JW
2943 return 0;
2944
2945err_free:
2946 virtnet_free_queues(vi);
2947err:
2948 return ret;
3f9c10b0
AS
2949}
2950
fbf28d78
MD
2951#ifdef CONFIG_SYSFS
2952static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
718ad681 2953 char *buf)
fbf28d78
MD
2954{
2955 struct virtnet_info *vi = netdev_priv(queue->dev);
2956 unsigned int queue_index = get_netdev_rx_queue_index(queue);
3cc81a9a
JW
2957 unsigned int headroom = virtnet_get_headroom(vi);
2958 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
5377d758 2959 struct ewma_pkt_len *avg;
fbf28d78
MD
2960
2961 BUG_ON(queue_index >= vi->max_queue_pairs);
2962 avg = &vi->rq[queue_index].mrg_avg_pkt_len;
d85b758f 2963 return sprintf(buf, "%u\n",
3cc81a9a
JW
2964 get_mergeable_buf_len(&vi->rq[queue_index], avg,
2965 SKB_DATA_ALIGN(headroom + tailroom)));
fbf28d78
MD
2966}
2967
2968static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
2969 __ATTR_RO(mergeable_rx_buffer_size);
2970
2971static struct attribute *virtio_net_mrg_rx_attrs[] = {
2972 &mergeable_rx_buffer_size_attribute.attr,
2973 NULL
2974};
2975
2976static const struct attribute_group virtio_net_mrg_rx_group = {
2977 .name = "virtio_net",
2978 .attrs = virtio_net_mrg_rx_attrs
2979};
2980#endif
2981
892d6eb1
JW
2982static bool virtnet_fail_on_feature(struct virtio_device *vdev,
2983 unsigned int fbit,
2984 const char *fname, const char *dname)
2985{
2986 if (!virtio_has_feature(vdev, fbit))
2987 return false;
2988
2989 dev_err(&vdev->dev, "device advertises feature %s but not %s",
2990 fname, dname);
2991
2992 return true;
2993}
2994
2995#define VIRTNET_FAIL_ON(vdev, fbit, dbit) \
2996 virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
2997
2998static bool virtnet_validate_features(struct virtio_device *vdev)
2999{
3000 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
3001 (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
3002 "VIRTIO_NET_F_CTRL_VQ") ||
3003 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
3004 "VIRTIO_NET_F_CTRL_VQ") ||
3005 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
3006 "VIRTIO_NET_F_CTRL_VQ") ||
3007 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
3008 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
3009 "VIRTIO_NET_F_CTRL_VQ"))) {
3010 return false;
3011 }
3012
3013 return true;
3014}
3015
d0c2c997
JW
3016#define MIN_MTU ETH_MIN_MTU
3017#define MAX_MTU ETH_MAX_MTU
3018
fe36cbe0 3019static int virtnet_validate(struct virtio_device *vdev)
296f96fc 3020{
6ba42248
MT
3021 if (!vdev->config->get) {
3022 dev_err(&vdev->dev, "%s failure: config access disabled\n",
3023 __func__);
3024 return -EINVAL;
3025 }
3026
892d6eb1
JW
3027 if (!virtnet_validate_features(vdev))
3028 return -EINVAL;
3029
fe36cbe0
MT
3030 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
3031 int mtu = virtio_cread16(vdev,
3032 offsetof(struct virtio_net_config,
3033 mtu));
3034 if (mtu < MIN_MTU)
3035 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
3036 }
3037
3038 return 0;
3039}
3040
3041static int virtnet_probe(struct virtio_device *vdev)
3042{
d7dfc5cf 3043 int i, err = -ENOMEM;
fe36cbe0
MT
3044 struct net_device *dev;
3045 struct virtnet_info *vi;
3046 u16 max_queue_pairs;
3047 int mtu;
3048
986a4f4d 3049 /* Find if host supports multiqueue virtio_net device */
855e0c52
RR
3050 err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
3051 struct virtio_net_config,
3052 max_virtqueue_pairs, &max_queue_pairs);
986a4f4d
JW
3053
3054 /* We need at least 2 queue's */
3055 if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
3056 max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
3057 !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
3058 max_queue_pairs = 1;
296f96fc
RR
3059
3060 /* Allocate ourselves a network device with room for our info */
986a4f4d 3061 dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
296f96fc
RR
3062 if (!dev)
3063 return -ENOMEM;
3064
3065 /* Set up network device as normal. */
ab5bd583
XZ
3066 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
3067 IFF_TX_SKB_NO_LINEAR;
76288b4e 3068 dev->netdev_ops = &virtnet_netdev;
296f96fc 3069 dev->features = NETIF_F_HIGHDMA;
3fa2a1df 3070
7ad24ea4 3071 dev->ethtool_ops = &virtnet_ethtool_ops;
296f96fc
RR
3072 SET_NETDEV_DEV(dev, &vdev->dev);
3073
3074 /* Do we support "hardware" checksums? */
98e778c9 3075 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
296f96fc 3076 /* This opens up the world of extra features. */
48900cb6 3077 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
98e778c9 3078 if (csum)
48900cb6 3079 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
98e778c9
MM
3080
3081 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
e078de03 3082 dev->hw_features |= NETIF_F_TSO
34a48579
RR
3083 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
3084 }
5539ae96 3085 /* Individual feature bits: what can host handle? */
98e778c9
MM
3086 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
3087 dev->hw_features |= NETIF_F_TSO;
3088 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
3089 dev->hw_features |= NETIF_F_TSO6;
3090 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
3091 dev->hw_features |= NETIF_F_TSO_ECN;
98e778c9 3092
41f2f127
JW
3093 dev->features |= NETIF_F_GSO_ROBUST;
3094
98e778c9 3095 if (gso)
e078de03 3096 dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
98e778c9 3097 /* (!csum && gso) case will be fixed by register_netdev() */
296f96fc 3098 }
4f49129b
TH
3099 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
3100 dev->features |= NETIF_F_RXCSUM;
a02e8964
WB
3101 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3102 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
3103 dev->features |= NETIF_F_LRO;
cf8691cb 3104 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
a02e8964 3105 dev->hw_features |= NETIF_F_LRO;
296f96fc 3106
4fda8302
JW
3107 dev->vlan_features = dev->features;
3108
d0c2c997
JW
3109 /* MTU range: 68 - 65535 */
3110 dev->min_mtu = MIN_MTU;
3111 dev->max_mtu = MAX_MTU;
3112
296f96fc 3113 /* Configuration may specify what MAC to use. Otherwise random. */
855e0c52
RR
3114 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
3115 virtio_cread_bytes(vdev,
3116 offsetof(struct virtio_net_config, mac),
3117 dev->dev_addr, dev->addr_len);
3118 else
f2cedb63 3119 eth_hw_addr_random(dev);
296f96fc
RR
3120
3121 /* Set up our device-specific information */
3122 vi = netdev_priv(dev);
296f96fc
RR
3123 vi->dev = dev;
3124 vi->vdev = vdev;
d9d5dcc8 3125 vdev->priv = vi;
827da44c 3126
586d17c5 3127 INIT_WORK(&vi->config_work, virtnet_config_changed_work);
296f96fc 3128
97402b96 3129 /* If we can receive ANY GSO packets, we must allocate large ones. */
8e95a202
JP
3130 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3131 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
e3e3c423
VY
3132 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
3133 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
97402b96
HX
3134 vi->big_packets = true;
3135
3f2c31d9
MM
3136 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
3137 vi->mergeable_rx_bufs = true;
3138
d04302b3
MT
3139 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
3140 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
012873d0
MT
3141 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
3142 else
3143 vi->hdr_len = sizeof(struct virtio_net_hdr);
3144
75993300
MT
3145 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
3146 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
e7428e95
MT
3147 vi->any_header_sg = true;
3148
986a4f4d
JW
3149 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
3150 vi->has_cvq = true;
3151
14de9d11
AC
3152 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
3153 mtu = virtio_cread16(vdev,
3154 offsetof(struct virtio_net_config,
3155 mtu));
93a205ee 3156 if (mtu < dev->min_mtu) {
fe36cbe0
MT
3157 /* Should never trigger: MTU was previously validated
3158 * in virtnet_validate.
3159 */
7934b481
YS
3160 dev_err(&vdev->dev,
3161 "device MTU appears to have changed it is now %d < %d",
3162 mtu, dev->min_mtu);
411ea23a 3163 err = -EINVAL;
d7dfc5cf 3164 goto free;
93a205ee 3165 }
2e123b44 3166
fe36cbe0
MT
3167 dev->mtu = mtu;
3168 dev->max_mtu = mtu;
3169
2e123b44
MT
3170 /* TODO: size buffers correctly in this case. */
3171 if (dev->mtu > ETH_DATA_LEN)
3172 vi->big_packets = true;
14de9d11
AC
3173 }
3174
012873d0
MT
3175 if (vi->any_header_sg)
3176 dev->needed_headroom = vi->hdr_len;
6ebbc1a6 3177
44900010
JW
3178 /* Enable multiqueue by default */
3179 if (num_online_cpus() >= max_queue_pairs)
3180 vi->curr_queue_pairs = max_queue_pairs;
3181 else
3182 vi->curr_queue_pairs = num_online_cpus();
986a4f4d
JW
3183 vi->max_queue_pairs = max_queue_pairs;
3184
3185 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
3f9c10b0 3186 err = init_vqs(vi);
d2a7ddda 3187 if (err)
d7dfc5cf 3188 goto free;
296f96fc 3189
fbf28d78
MD
3190#ifdef CONFIG_SYSFS
3191 if (vi->mergeable_rx_bufs)
3192 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
3193#endif
0f13b66b
ZYW
3194 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
3195 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
986a4f4d 3196
16032be5
NA
3197 virtnet_init_settings(dev);
3198
ba5e4426
SS
3199 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
3200 vi->failover = net_failover_create(vi->dev);
4b8e6ac4
WY
3201 if (IS_ERR(vi->failover)) {
3202 err = PTR_ERR(vi->failover);
ba5e4426 3203 goto free_vqs;
4b8e6ac4 3204 }
ba5e4426
SS
3205 }
3206
296f96fc
RR
3207 err = register_netdev(dev);
3208 if (err) {
3209 pr_debug("virtio_net: registering device failed\n");
ba5e4426 3210 goto free_failover;
296f96fc 3211 }
b3369c1f 3212
4baf1e33
MT
3213 virtio_device_ready(vdev);
3214
8017c279 3215 err = virtnet_cpu_notif_add(vi);
8de4b2f3
WG
3216 if (err) {
3217 pr_debug("virtio_net: registering cpu notifier failed\n");
f00e35e2 3218 goto free_unregister_netdev;
8de4b2f3
WG
3219 }
3220
a220871b 3221 virtnet_set_queues(vi, vi->curr_queue_pairs);
44900010 3222
167c25e4
JW
3223 /* Assume link up if device can't report link status,
3224 otherwise get link status from config. */
bda7fab5 3225 netif_carrier_off(dev);
167c25e4 3226 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
3b07e9ca 3227 schedule_work(&vi->config_work);
167c25e4
JW
3228 } else {
3229 vi->status = VIRTIO_NET_S_LINK_UP;
faa9b39f 3230 virtnet_update_settings(vi);
167c25e4
JW
3231 netif_carrier_on(dev);
3232 }
9f4d26d0 3233
3f93522f
JW
3234 for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
3235 if (virtio_has_feature(vi->vdev, guest_offloads[i]))
3236 set_bit(guest_offloads[i], &vi->guest_offloads);
a02e8964 3237 vi->guest_offloads_capable = vi->guest_offloads;
3f93522f 3238
986a4f4d
JW
3239 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
3240 dev->name, max_queue_pairs);
3241
296f96fc
RR
3242 return 0;
3243
f00e35e2 3244free_unregister_netdev:
02465555
MT
3245 vi->vdev->config->reset(vdev);
3246
b3369c1f 3247 unregister_netdev(dev);
ba5e4426
SS
3248free_failover:
3249 net_failover_destroy(vi->failover);
d2a7ddda 3250free_vqs:
986a4f4d 3251 cancel_delayed_work_sync(&vi->refill);
fb51879d 3252 free_receive_page_frags(vi);
e9d7417b 3253 virtnet_del_vqs(vi);
296f96fc
RR
3254free:
3255 free_netdev(dev);
3256 return err;
3257}
3258
04486ed0 3259static void remove_vq_common(struct virtnet_info *vi)
296f96fc 3260{
04486ed0 3261 vi->vdev->config->reset(vi->vdev);
830a8a97
SM
3262
3263 /* Free unused buffers in both send and recv, if any. */
9ab86bbc 3264 free_unused_bufs(vi);
fb6813f4 3265
986a4f4d 3266 free_receive_bufs(vi);
d2a7ddda 3267
fb51879d
MD
3268 free_receive_page_frags(vi);
3269
986a4f4d 3270 virtnet_del_vqs(vi);
04486ed0
AS
3271}
3272
8cc085d6 3273static void virtnet_remove(struct virtio_device *vdev)
04486ed0
AS
3274{
3275 struct virtnet_info *vi = vdev->priv;
3276
8017c279 3277 virtnet_cpu_notif_remove(vi);
8de4b2f3 3278
102a2786
MT
3279 /* Make sure no work handler is accessing the device. */
3280 flush_work(&vi->config_work);
586d17c5 3281
04486ed0
AS
3282 unregister_netdev(vi->dev);
3283
ba5e4426
SS
3284 net_failover_destroy(vi->failover);
3285
04486ed0 3286 remove_vq_common(vi);
fb6813f4 3287
74b2553f 3288 free_netdev(vi->dev);
296f96fc
RR
3289}
3290
67a75194 3291static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
0741bcb5
AS
3292{
3293 struct virtnet_info *vi = vdev->priv;
3294
8017c279 3295 virtnet_cpu_notif_remove(vi);
9fe7bfce 3296 virtnet_freeze_down(vdev);
0741bcb5
AS
3297 remove_vq_common(vi);
3298
3299 return 0;
3300}
3301
67a75194 3302static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
0741bcb5
AS
3303{
3304 struct virtnet_info *vi = vdev->priv;
9fe7bfce 3305 int err;
0741bcb5 3306
9fe7bfce 3307 err = virtnet_restore_up(vdev);
0741bcb5
AS
3308 if (err)
3309 return err;
986a4f4d
JW
3310 virtnet_set_queues(vi, vi->curr_queue_pairs);
3311
8017c279 3312 err = virtnet_cpu_notif_add(vi);
3f2869ca
XY
3313 if (err) {
3314 virtnet_freeze_down(vdev);
3315 remove_vq_common(vi);
ec9debbd 3316 return err;
3f2869ca 3317 }
ec9debbd 3318
0741bcb5
AS
3319 return 0;
3320}
0741bcb5 3321
296f96fc
RR
3322static struct virtio_device_id id_table[] = {
3323 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
3324 { 0 },
3325};
3326
f3358507
MT
3327#define VIRTNET_FEATURES \
3328 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
3329 VIRTIO_NET_F_MAC, \
3330 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
3331 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
3332 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
3333 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
3334 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
3335 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
3336 VIRTIO_NET_F_CTRL_MAC_ADDR, \
faa9b39f 3337 VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
9805069d 3338 VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY
f3358507 3339
c45a6816 3340static unsigned int features[] = {
f3358507
MT
3341 VIRTNET_FEATURES,
3342};
3343
3344static unsigned int features_legacy[] = {
3345 VIRTNET_FEATURES,
3346 VIRTIO_NET_F_GSO,
e7428e95 3347 VIRTIO_F_ANY_LAYOUT,
c45a6816
RR
3348};
3349
22402529 3350static struct virtio_driver virtio_net_driver = {
c45a6816
RR
3351 .feature_table = features,
3352 .feature_table_size = ARRAY_SIZE(features),
f3358507
MT
3353 .feature_table_legacy = features_legacy,
3354 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
296f96fc
RR
3355 .driver.name = KBUILD_MODNAME,
3356 .driver.owner = THIS_MODULE,
3357 .id_table = id_table,
fe36cbe0 3358 .validate = virtnet_validate,
296f96fc 3359 .probe = virtnet_probe,
8cc085d6 3360 .remove = virtnet_remove,
9f4d26d0 3361 .config_changed = virtnet_config_changed,
89107000 3362#ifdef CONFIG_PM_SLEEP
0741bcb5
AS
3363 .freeze = virtnet_freeze,
3364 .restore = virtnet_restore,
3365#endif
296f96fc
RR
3366};
3367
8017c279
SAS
3368static __init int virtio_net_driver_init(void)
3369{
3370 int ret;
3371
73c1b41e 3372 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
8017c279
SAS
3373 virtnet_cpu_online,
3374 virtnet_cpu_down_prep);
3375 if (ret < 0)
3376 goto out;
3377 virtionet_online = ret;
73c1b41e 3378 ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
8017c279
SAS
3379 NULL, virtnet_cpu_dead);
3380 if (ret)
3381 goto err_dead;
3382
3383 ret = register_virtio_driver(&virtio_net_driver);
3384 if (ret)
3385 goto err_virtio;
3386 return 0;
3387err_virtio:
3388 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
3389err_dead:
3390 cpuhp_remove_multi_state(virtionet_online);
3391out:
3392 return ret;
3393}
3394module_init(virtio_net_driver_init);
3395
3396static __exit void virtio_net_driver_exit(void)
3397{
cfa0ebc9 3398 unregister_virtio_driver(&virtio_net_driver);
8017c279
SAS
3399 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
3400 cpuhp_remove_multi_state(virtionet_online);
8017c279
SAS
3401}
3402module_exit(virtio_net_driver_exit);
296f96fc
RR
3403
3404MODULE_DEVICE_TABLE(virtio, id_table);
3405MODULE_DESCRIPTION("Virtio network driver");
3406MODULE_LICENSE("GPL");