virtio-net: restrict build_skb() use to some arches
[linux-block.git] / drivers / net / virtio_net.c
CommitLineData
1ccea77e 1// SPDX-License-Identifier: GPL-2.0-or-later
48925e37 2/* A network driver using virtio.
296f96fc
RR
3 *
4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
296f96fc
RR
5 */
6//#define DEBUG
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>
a9ea3fc6 9#include <linux/ethtool.h>
296f96fc
RR
10#include <linux/module.h>
11#include <linux/virtio.h>
12#include <linux/virtio_net.h>
f600b690 13#include <linux/bpf.h>
a67edbf4 14#include <linux/bpf_trace.h>
296f96fc 15#include <linux/scatterlist.h>
e918085a 16#include <linux/if_vlan.h>
5a0e3ad6 17#include <linux/slab.h>
8de4b2f3 18#include <linux/cpu.h>
ab7db917 19#include <linux/average.h>
186b3c99 20#include <linux/filter.h>
2ca653d6 21#include <linux/kernel.h>
d85b758f 22#include <net/route.h>
754b8a21 23#include <net/xdp.h>
ba5e4426 24#include <net/net_failover.h>
296f96fc 25
d34710e3 26static int napi_weight = NAPI_POLL_WEIGHT;
6c0cd7c0
DL
27module_param(napi_weight, int, 0444);
28
31c03aef 29static bool csum = true, gso = true, napi_tx = true;
34a48579
RR
30module_param(csum, bool, 0444);
31module_param(gso, bool, 0444);
b92f1e67 32module_param(napi_tx, bool, 0644);
34a48579 33
296f96fc 34/* FIXME: MTU in config. */
5061de36 35#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
3f2c31d9 36#define GOOD_COPY_LEN 128
296f96fc 37
f6b10209
JW
38#define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
39
2de2f7f4
JF
40/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
41#define VIRTIO_XDP_HEADROOM 256
42
2471c75e
JDB
43/* Separating two types of XDP xmit */
44#define VIRTIO_XDP_TX BIT(0)
45#define VIRTIO_XDP_REDIR BIT(1)
46
5050471d
TM
47#define VIRTIO_XDP_FLAG BIT(0)
48
5377d758
JB
49/* RX packet size EWMA. The average packet size is used to determine the packet
50 * buffer size when refilling RX rings. As the entire RX ring may be refilled
51 * at once, the weight is chosen so that the EWMA will be insensitive to short-
52 * term, transient changes in packet size.
ab7db917 53 */
eb1e011a 54DECLARE_EWMA(pkt_len, 0, 64)
ab7db917 55
66846048 56#define VIRTNET_DRIVER_VERSION "1.0.0"
2a41f71d 57
7acd4329
CIK
58static const unsigned long guest_offloads[] = {
59 VIRTIO_NET_F_GUEST_TSO4,
60 VIRTIO_NET_F_GUEST_TSO6,
61 VIRTIO_NET_F_GUEST_ECN,
e59ff2c4
JW
62 VIRTIO_NET_F_GUEST_UFO,
63 VIRTIO_NET_F_GUEST_CSUM
7acd4329 64};
3f93522f 65
1a03b8a3
TZ
66#define GUEST_OFFLOAD_LRO_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
67 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
68 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
69 (1ULL << VIRTIO_NET_F_GUEST_UFO))
70
d7dfc5cf
TM
71struct virtnet_stat_desc {
72 char desc[ETH_GSTRING_LEN];
73 size_t offset;
3fa2a1df 74};
75
d7dfc5cf
TM
76struct virtnet_sq_stats {
77 struct u64_stats_sync syncp;
78 u64 packets;
79 u64 bytes;
5b8f3c8d
TM
80 u64 xdp_tx;
81 u64 xdp_tx_drops;
461f03dc 82 u64 kicks;
d7dfc5cf
TM
83};
84
d46eeeaf
JW
85struct virtnet_rq_stats {
86 struct u64_stats_sync syncp;
d7dfc5cf
TM
87 u64 packets;
88 u64 bytes;
2c4a2f7d 89 u64 drops;
5b8f3c8d
TM
90 u64 xdp_packets;
91 u64 xdp_tx;
92 u64 xdp_redirects;
93 u64 xdp_drops;
461f03dc 94 u64 kicks;
d7dfc5cf
TM
95};
96
97#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
d46eeeaf 98#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m)
d7dfc5cf
TM
99
100static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
5b8f3c8d
TM
101 { "packets", VIRTNET_SQ_STAT(packets) },
102 { "bytes", VIRTNET_SQ_STAT(bytes) },
103 { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) },
104 { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) },
461f03dc 105 { "kicks", VIRTNET_SQ_STAT(kicks) },
d7dfc5cf
TM
106};
107
108static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
5b8f3c8d
TM
109 { "packets", VIRTNET_RQ_STAT(packets) },
110 { "bytes", VIRTNET_RQ_STAT(bytes) },
111 { "drops", VIRTNET_RQ_STAT(drops) },
112 { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) },
113 { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) },
114 { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) },
115 { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) },
461f03dc 116 { "kicks", VIRTNET_RQ_STAT(kicks) },
d7dfc5cf
TM
117};
118
119#define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc)
120#define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc)
121
e9d7417b
JW
122/* Internal representation of a send virtqueue */
123struct send_queue {
124 /* Virtqueue associated with this send _queue */
125 struct virtqueue *vq;
126
127 /* TX: fragments + linear part + virtio header */
128 struct scatterlist sg[MAX_SKB_FRAGS + 2];
986a4f4d
JW
129
130 /* Name of the send queue: output.$index */
131 char name[40];
b92f1e67 132
d7dfc5cf
TM
133 struct virtnet_sq_stats stats;
134
b92f1e67 135 struct napi_struct napi;
e9d7417b
JW
136};
137
138/* Internal representation of a receive virtqueue */
139struct receive_queue {
140 /* Virtqueue associated with this receive_queue */
141 struct virtqueue *vq;
142
296f96fc
RR
143 struct napi_struct napi;
144
f600b690
JF
145 struct bpf_prog __rcu *xdp_prog;
146
d7dfc5cf
TM
147 struct virtnet_rq_stats stats;
148
e9d7417b
JW
149 /* Chain pages by the private ptr. */
150 struct page *pages;
151
ab7db917 152 /* Average packet length for mergeable receive buffers. */
5377d758 153 struct ewma_pkt_len mrg_avg_pkt_len;
ab7db917 154
fb51879d
MD
155 /* Page frag for packet buffer allocation. */
156 struct page_frag alloc_frag;
157
e9d7417b
JW
158 /* RX: fragments + linear part + virtio header */
159 struct scatterlist sg[MAX_SKB_FRAGS + 2];
986a4f4d 160
d85b758f
MT
161 /* Min single buffer size for mergeable buffers case. */
162 unsigned int min_buf_len;
163
986a4f4d
JW
164 /* Name of this receive queue: input.$index */
165 char name[40];
754b8a21
JDB
166
167 struct xdp_rxq_info xdp_rxq;
e9d7417b
JW
168};
169
12e57169
MT
170/* Control VQ buffers: protected by the rtnl lock */
171struct control_buf {
172 struct virtio_net_ctrl_hdr hdr;
173 virtio_net_ctrl_ack status;
174 struct virtio_net_ctrl_mq mq;
175 u8 promisc;
176 u8 allmulti;
d7fad4c8 177 __virtio16 vid;
f4ee703a 178 __virtio64 offloads;
12e57169
MT
179};
180
e9d7417b
JW
181struct virtnet_info {
182 struct virtio_device *vdev;
183 struct virtqueue *cvq;
184 struct net_device *dev;
986a4f4d
JW
185 struct send_queue *sq;
186 struct receive_queue *rq;
e9d7417b
JW
187 unsigned int status;
188
986a4f4d
JW
189 /* Max # of queue pairs supported by the device */
190 u16 max_queue_pairs;
191
192 /* # of queue pairs currently used by the driver */
193 u16 curr_queue_pairs;
194
672aafd5
JF
195 /* # of XDP queue pairs currently used by the driver */
196 u16 xdp_queue_pairs;
197
97c2c69e
XZ
198 /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
199 bool xdp_enabled;
200
97402b96
HX
201 /* I like... big packets and I cannot lie! */
202 bool big_packets;
203
3f2c31d9
MM
204 /* Host will merge rx buffers for big packets (shake it! shake it!) */
205 bool mergeable_rx_bufs;
206
986a4f4d
JW
207 /* Has control virtqueue */
208 bool has_cvq;
209
e7428e95
MT
210 /* Host can handle any s/g split between our header and packet data */
211 bool any_header_sg;
212
012873d0
MT
213 /* Packet virtio header size */
214 u8 hdr_len;
215
3161e453
RR
216 /* Work struct for refilling if we run low on memory. */
217 struct delayed_work refill;
218
586d17c5
JW
219 /* Work struct for config space updates */
220 struct work_struct config_work;
221
986a4f4d
JW
222 /* Does the affinity hint is set for virtqueues? */
223 bool affinity_hint_set;
47be2479 224
8017c279
SAS
225 /* CPU hotplug instances for online & dead */
226 struct hlist_node node;
227 struct hlist_node node_dead;
2ac46030 228
12e57169 229 struct control_buf *ctrl;
16032be5
NA
230
231 /* Ethtool settings */
232 u8 duplex;
233 u32 speed;
3f93522f
JW
234
235 unsigned long guest_offloads;
a02e8964 236 unsigned long guest_offloads_capable;
ba5e4426
SS
237
238 /* failover when STANDBY feature enabled */
239 struct failover *failover;
296f96fc
RR
240};
241
9ab86bbc 242struct padded_vnet_hdr {
012873d0 243 struct virtio_net_hdr_mrg_rxbuf hdr;
9ab86bbc 244 /*
012873d0
MT
245 * hdr is in a separate sg buffer, and data sg buffer shares same page
246 * with this header sg. This padding makes next sg 16 byte aligned
247 * after the header.
9ab86bbc 248 */
012873d0 249 char padding[4];
9ab86bbc
SM
250};
251
5050471d
TM
252static bool is_xdp_frame(void *ptr)
253{
254 return (unsigned long)ptr & VIRTIO_XDP_FLAG;
255}
256
257static void *xdp_to_ptr(struct xdp_frame *ptr)
258{
259 return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
260}
261
262static struct xdp_frame *ptr_to_xdp(void *ptr)
263{
264 return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
265}
266
986a4f4d
JW
267/* Converting between virtqueue no. and kernel tx/rx queue no.
268 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
269 */
270static int vq2txq(struct virtqueue *vq)
271{
9d0ca6ed 272 return (vq->index - 1) / 2;
986a4f4d
JW
273}
274
275static int txq2vq(int txq)
276{
277 return txq * 2 + 1;
278}
279
280static int vq2rxq(struct virtqueue *vq)
281{
9d0ca6ed 282 return vq->index / 2;
986a4f4d
JW
283}
284
285static int rxq2vq(int rxq)
286{
287 return rxq * 2;
288}
289
012873d0 290static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
296f96fc 291{
012873d0 292 return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
296f96fc
RR
293}
294
9ab86bbc
SM
295/*
296 * private is used to chain pages for big packets, put the whole
297 * most recent used list in the beginning for reuse
298 */
e9d7417b 299static void give_pages(struct receive_queue *rq, struct page *page)
0a888fd1 300{
9ab86bbc 301 struct page *end;
0a888fd1 302
e9d7417b 303 /* Find end of list, sew whole thing into vi->rq.pages. */
9ab86bbc 304 for (end = page; end->private; end = (struct page *)end->private);
e9d7417b
JW
305 end->private = (unsigned long)rq->pages;
306 rq->pages = page;
0a888fd1
MM
307}
308
e9d7417b 309static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
fb6813f4 310{
e9d7417b 311 struct page *p = rq->pages;
fb6813f4 312
9ab86bbc 313 if (p) {
e9d7417b 314 rq->pages = (struct page *)p->private;
9ab86bbc
SM
315 /* clear private here, it is used to chain pages */
316 p->private = 0;
317 } else
fb6813f4
RR
318 p = alloc_page(gfp_mask);
319 return p;
320}
321
e4e8452a
WB
322static void virtqueue_napi_schedule(struct napi_struct *napi,
323 struct virtqueue *vq)
324{
325 if (napi_schedule_prep(napi)) {
326 virtqueue_disable_cb(vq);
327 __napi_schedule(napi);
328 }
329}
330
331static void virtqueue_napi_complete(struct napi_struct *napi,
332 struct virtqueue *vq, int processed)
333{
334 int opaque;
335
336 opaque = virtqueue_enable_cb_prepare(vq);
fdaa767a
TM
337 if (napi_complete_done(napi, processed)) {
338 if (unlikely(virtqueue_poll(vq, opaque)))
339 virtqueue_napi_schedule(napi, vq);
340 } else {
341 virtqueue_disable_cb(vq);
342 }
e4e8452a
WB
343}
344
e9d7417b 345static void skb_xmit_done(struct virtqueue *vq)
296f96fc 346{
e9d7417b 347 struct virtnet_info *vi = vq->vdev->priv;
b92f1e67 348 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
296f96fc 349
2cb9c6ba 350 /* Suppress further interrupts. */
e9d7417b 351 virtqueue_disable_cb(vq);
11a3a154 352
b92f1e67
WB
353 if (napi->weight)
354 virtqueue_napi_schedule(napi, vq);
355 else
356 /* We were probably waiting for more output buffers. */
357 netif_wake_subqueue(vi->dev, vq2txq(vq));
296f96fc
RR
358}
359
28b39bc7
JW
360#define MRG_CTX_HEADER_SHIFT 22
361static void *mergeable_len_to_ctx(unsigned int truesize,
362 unsigned int headroom)
363{
364 return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
365}
366
367static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
368{
369 return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
370}
371
372static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
373{
374 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
375}
376
3464645a 377/* Called from bottom half context */
946fa564
MT
378static struct sk_buff *page_to_skb(struct virtnet_info *vi,
379 struct receive_queue *rq,
2613af0e 380 struct page *page, unsigned int offset,
436c9453 381 unsigned int len, unsigned int truesize,
fb32856b
XZ
382 bool hdr_valid, unsigned int metasize,
383 unsigned int headroom)
9ab86bbc
SM
384{
385 struct sk_buff *skb;
012873d0 386 struct virtio_net_hdr_mrg_rxbuf *hdr;
2613af0e 387 unsigned int copy, hdr_len, hdr_padded_len;
fb32856b
XZ
388 int tailroom, shinfo_size;
389 char *p, *hdr_p;
fb6813f4 390
2613af0e 391 p = page_address(page) + offset;
fb32856b 392 hdr_p = p;
3f2c31d9 393
012873d0
MT
394 hdr_len = vi->hdr_len;
395 if (vi->mergeable_rx_bufs)
a4a76503 396 hdr_padded_len = sizeof(*hdr);
012873d0 397 else
2613af0e 398 hdr_padded_len = sizeof(struct padded_vnet_hdr);
3f2c31d9 399
fb32856b
XZ
400 /* If headroom is not 0, there is an offset between the beginning of the
401 * data and the allocated space, otherwise the data and the allocated
402 * space are aligned.
403 */
404 if (headroom) {
405 /* The actual allocated space size is PAGE_SIZE. */
406 truesize = PAGE_SIZE;
407 tailroom = truesize - len - offset;
408 } else {
409 tailroom = truesize - len;
410 }
3f2c31d9 411
9ab86bbc 412 len -= hdr_len;
2613af0e
MD
413 offset += hdr_padded_len;
414 p += hdr_padded_len;
3f2c31d9 415
fb32856b
XZ
416 shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
417
f5d7872a 418 if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
fb32856b
XZ
419 skb = build_skb(p, truesize);
420 if (unlikely(!skb))
421 return NULL;
422
423 skb_put(skb, len);
424 goto ok;
425 }
426
427 /* copy small packet so we can reuse these pages for small data */
428 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
429 if (unlikely(!skb))
430 return NULL;
431
0f6925b3
ED
432 /* Copy all frame if it fits skb->head, otherwise
433 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
434 */
435 if (len <= skb_tailroom(skb))
436 copy = len;
437 else
438 copy = ETH_HLEN + metasize;
59ae1d12 439 skb_put_data(skb, p, copy);
3f2c31d9 440
9ab86bbc
SM
441 len -= copy;
442 offset += copy;
3f2c31d9 443
2613af0e
MD
444 if (vi->mergeable_rx_bufs) {
445 if (len)
446 skb_add_rx_frag(skb, 0, page, offset, len, truesize);
447 else
448 put_page(page);
fb32856b 449 goto ok;
2613af0e
MD
450 }
451
e878d78b
SL
452 /*
453 * Verify that we can indeed put this data into a skb.
454 * This is here to handle cases when the device erroneously
455 * tries to receive more than is possible. This is usually
456 * the case of a broken device.
457 */
458 if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
be443899 459 net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
e878d78b
SL
460 dev_kfree_skb(skb);
461 return NULL;
462 }
2613af0e 463 BUG_ON(offset >= PAGE_SIZE);
9ab86bbc 464 while (len) {
2613af0e
MD
465 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
466 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
467 frag_size, truesize);
468 len -= frag_size;
9ab86bbc
SM
469 page = (struct page *)page->private;
470 offset = 0;
471 }
3f2c31d9 472
9ab86bbc 473 if (page)
e9d7417b 474 give_pages(rq, page);
3f2c31d9 475
fb32856b
XZ
476ok:
477 /* hdr_valid means no XDP, so we can copy the vnet header */
478 if (hdr_valid) {
479 hdr = skb_vnet_hdr(skb);
480 memcpy(hdr, hdr_p, hdr_len);
481 }
482
483 if (metasize) {
484 __skb_pull(skb, metasize);
485 skb_metadata_set(skb, metasize);
486 }
487
9ab86bbc
SM
488 return skb;
489}
3f2c31d9 490
735fc405
JDB
491static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
492 struct send_queue *sq,
493 struct xdp_frame *xdpf)
56434a01 494{
56434a01 495 struct virtio_net_hdr_mrg_rxbuf *hdr;
56434a01
JF
496 int err;
497
cac320c8
JDB
498 if (unlikely(xdpf->headroom < vi->hdr_len))
499 return -EOVERFLOW;
500
501 /* Make room for virtqueue hdr (also change xdpf->headroom?) */
502 xdpf->data -= vi->hdr_len;
f6b10209 503 /* Zero header and leave csum up to XDP layers */
cac320c8 504 hdr = xdpf->data;
f6b10209 505 memset(hdr, 0, vi->hdr_len);
cac320c8 506 xdpf->len += vi->hdr_len;
bb91accf 507
cac320c8 508 sg_init_one(sq->sg, xdpf->data, xdpf->len);
bb91accf 509
5050471d
TM
510 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
511 GFP_ATOMIC);
11b7d897 512 if (unlikely(err))
cac320c8 513 return -ENOSPC; /* Caller handle free/refcnt */
56434a01 514
cac320c8 515 return 0;
56434a01
JF
516}
517
97c2c69e
XZ
518/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
519 * the current cpu, so it does not need to be locked.
520 *
521 * Here we use marco instead of inline functions because we have to deal with
522 * three issues at the same time: 1. the choice of sq. 2. judge and execute the
523 * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
524 * functions to perfectly solve these three problems at the same time.
525 */
526#define virtnet_xdp_get_sq(vi) ({ \
527 struct netdev_queue *txq; \
528 typeof(vi) v = (vi); \
529 unsigned int qp; \
530 \
531 if (v->curr_queue_pairs > nr_cpu_ids) { \
532 qp = v->curr_queue_pairs - v->xdp_queue_pairs; \
533 qp += smp_processor_id(); \
534 txq = netdev_get_tx_queue(v->dev, qp); \
535 __netif_tx_acquire(txq); \
536 } else { \
537 qp = smp_processor_id() % v->curr_queue_pairs; \
538 txq = netdev_get_tx_queue(v->dev, qp); \
539 __netif_tx_lock(txq, raw_smp_processor_id()); \
540 } \
541 v->sq + qp; \
542})
543
544#define virtnet_xdp_put_sq(vi, q) { \
545 struct netdev_queue *txq; \
546 typeof(vi) v = (vi); \
547 \
548 txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
549 if (v->curr_queue_pairs > nr_cpu_ids) \
550 __netif_tx_release(txq); \
551 else \
552 __netif_tx_unlock(txq); \
2a43565c
TM
553}
554
735fc405 555static int virtnet_xdp_xmit(struct net_device *dev,
42b33468 556 int n, struct xdp_frame **frames, u32 flags)
186b3c99
JW
557{
558 struct virtnet_info *vi = netdev_priv(dev);
8dcc5b0a
JDB
559 struct receive_queue *rq = vi->rq;
560 struct bpf_prog *xdp_prog;
735fc405
JDB
561 struct send_queue *sq;
562 unsigned int len;
546f2897
TM
563 int packets = 0;
564 int bytes = 0;
fdc13979 565 int nxmit = 0;
461f03dc 566 int kicks = 0;
5050471d 567 void *ptr;
fdc13979 568 int ret;
735fc405
JDB
569 int i;
570
8dcc5b0a
JDB
571 /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
572 * indicate XDP resources have been successfully allocated.
573 */
9719c6b9 574 xdp_prog = rcu_access_pointer(rq->xdp_prog);
1667c08a
TM
575 if (!xdp_prog)
576 return -ENXIO;
577
97c2c69e 578 sq = virtnet_xdp_get_sq(vi);
1667c08a
TM
579
580 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
581 ret = -EINVAL;
5b8f3c8d
TM
582 goto out;
583 }
8dcc5b0a 584
735fc405 585 /* Free up any pending old buffers before queueing new ones. */
5050471d 586 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
546f2897
TM
587 if (likely(is_xdp_frame(ptr))) {
588 struct xdp_frame *frame = ptr_to_xdp(ptr);
589
590 bytes += frame->len;
591 xdp_return_frame(frame);
592 } else {
593 struct sk_buff *skb = ptr;
594
595 bytes += skb->len;
596 napi_consume_skb(skb, false);
597 }
598 packets++;
5050471d 599 }
735fc405
JDB
600
601 for (i = 0; i < n; i++) {
602 struct xdp_frame *xdpf = frames[i];
603
fdc13979
LB
604 if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
605 break;
606 nxmit++;
735fc405 607 }
fdc13979 608 ret = nxmit;
5d274cb4 609
461f03dc
TM
610 if (flags & XDP_XMIT_FLUSH) {
611 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
612 kicks = 1;
613 }
5b8f3c8d
TM
614out:
615 u64_stats_update_begin(&sq->stats.syncp);
546f2897
TM
616 sq->stats.bytes += bytes;
617 sq->stats.packets += packets;
5b8f3c8d 618 sq->stats.xdp_tx += n;
fdc13979 619 sq->stats.xdp_tx_drops += n - nxmit;
461f03dc 620 sq->stats.kicks += kicks;
5b8f3c8d 621 u64_stats_update_end(&sq->stats.syncp);
5d274cb4 622
97c2c69e 623 virtnet_xdp_put_sq(vi, sq);
5b8f3c8d 624 return ret;
186b3c99
JW
625}
626
f6b10209
JW
627static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
628{
97c2c69e 629 return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
f6b10209
JW
630}
631
4941d472
JW
632/* We copy the packet for XDP in the following cases:
633 *
634 * 1) Packet is scattered across multiple rx buffers.
635 * 2) Headroom space is insufficient.
636 *
637 * This is inefficient but it's a temporary condition that
638 * we hit right after XDP is enabled and until queue is refilled
639 * with large buffers with sufficient headroom - so it should affect
640 * at most queue size packets.
641 * Afterwards, the conditions to enable
642 * XDP should preclude the underlying device from sending packets
643 * across multiple buffers (num_buf > 1), and we make sure buffers
644 * have enough headroom.
645 */
646static struct page *xdp_linearize_page(struct receive_queue *rq,
647 u16 *num_buf,
648 struct page *p,
649 int offset,
650 int page_off,
651 unsigned int *len)
652{
653 struct page *page = alloc_page(GFP_ATOMIC);
654
655 if (!page)
656 return NULL;
657
658 memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
659 page_off += *len;
660
661 while (--*num_buf) {
3cc81a9a 662 int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4941d472
JW
663 unsigned int buflen;
664 void *buf;
665 int off;
666
667 buf = virtqueue_get_buf(rq->vq, &buflen);
668 if (unlikely(!buf))
669 goto err_buf;
670
671 p = virt_to_head_page(buf);
672 off = buf - page_address(p);
673
674 /* guard against a misconfigured or uncooperative backend that
675 * is sending packet larger than the MTU.
676 */
3cc81a9a 677 if ((page_off + buflen + tailroom) > PAGE_SIZE) {
4941d472
JW
678 put_page(p);
679 goto err_buf;
680 }
681
682 memcpy(page_address(page) + page_off,
683 page_address(p) + off, buflen);
684 page_off += buflen;
685 put_page(p);
686 }
687
688 /* Headroom does not contribute to packet length */
689 *len = page_off - VIRTIO_XDP_HEADROOM;
690 return page;
691err_buf:
692 __free_pages(page, 0);
693 return NULL;
694}
695
bb91accf
JW
696static struct sk_buff *receive_small(struct net_device *dev,
697 struct virtnet_info *vi,
698 struct receive_queue *rq,
192f68cf 699 void *buf, void *ctx,
186b3c99 700 unsigned int len,
7d9d60fd 701 unsigned int *xdp_xmit,
d46eeeaf 702 struct virtnet_rq_stats *stats)
f121159d 703{
f6b10209 704 struct sk_buff *skb;
bb91accf 705 struct bpf_prog *xdp_prog;
4941d472 706 unsigned int xdp_headroom = (unsigned long)ctx;
f6b10209
JW
707 unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
708 unsigned int headroom = vi->hdr_len + header_offset;
709 unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
710 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4941d472 711 struct page *page = virt_to_head_page(buf);
11b7d897 712 unsigned int delta = 0;
4941d472 713 struct page *xdp_page;
11b7d897 714 int err;
503d539a 715 unsigned int metasize = 0;
11b7d897 716
012873d0 717 len -= vi->hdr_len;
d46eeeaf 718 stats->bytes += len;
f121159d 719
bb91accf
JW
720 rcu_read_lock();
721 xdp_prog = rcu_dereference(rq->xdp_prog);
722 if (xdp_prog) {
f6b10209 723 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
44fa2dbd 724 struct xdp_frame *xdpf;
0354e4d1 725 struct xdp_buff xdp;
f6b10209 726 void *orig_data;
bb91accf
JW
727 u32 act;
728
95dbe9e7 729 if (unlikely(hdr->hdr.gso_type))
bb91accf 730 goto err_xdp;
0354e4d1 731
4941d472
JW
732 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
733 int offset = buf - page_address(page) + header_offset;
734 unsigned int tlen = len + vi->hdr_len;
735 u16 num_buf = 1;
736
737 xdp_headroom = virtnet_get_headroom(vi);
738 header_offset = VIRTNET_RX_PAD + xdp_headroom;
739 headroom = vi->hdr_len + header_offset;
740 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
741 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
742 xdp_page = xdp_linearize_page(rq, &num_buf, page,
743 offset, header_offset,
744 &tlen);
745 if (!xdp_page)
746 goto err_xdp;
747
748 buf = page_address(xdp_page);
749 put_page(page);
750 page = xdp_page;
751 }
752
43b5169d 753 xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
be9df4af
LB
754 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
755 xdp_headroom, len, true);
f6b10209 756 orig_data = xdp.data;
0354e4d1 757 act = bpf_prog_run_xdp(xdp_prog, &xdp);
d46eeeaf 758 stats->xdp_packets++;
0354e4d1 759
bb91accf
JW
760 switch (act) {
761 case XDP_PASS:
2de2f7f4 762 /* Recalculate length in case bpf program changed it */
f6b10209 763 delta = orig_data - xdp.data;
6870de43 764 len = xdp.data_end - xdp.data;
503d539a 765 metasize = xdp.data - xdp.data_meta;
bb91accf
JW
766 break;
767 case XDP_TX:
d46eeeaf 768 stats->xdp_tx++;
1b698fa5 769 xdpf = xdp_convert_buff_to_frame(&xdp);
44fa2dbd
JDB
770 if (unlikely(!xdpf))
771 goto err_xdp;
ca9e83b4 772 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
fdc13979
LB
773 if (unlikely(!err)) {
774 xdp_return_frame_rx_napi(xdpf);
775 } else if (unlikely(err < 0)) {
0354e4d1 776 trace_xdp_exception(vi->dev, xdp_prog, act);
11b7d897
JDB
777 goto err_xdp;
778 }
2471c75e 779 *xdp_xmit |= VIRTIO_XDP_TX;
186b3c99
JW
780 rcu_read_unlock();
781 goto xdp_xmit;
782 case XDP_REDIRECT:
d46eeeaf 783 stats->xdp_redirects++;
186b3c99 784 err = xdp_do_redirect(dev, &xdp, xdp_prog);
11b7d897
JDB
785 if (err)
786 goto err_xdp;
2471c75e 787 *xdp_xmit |= VIRTIO_XDP_REDIR;
bb91accf
JW
788 rcu_read_unlock();
789 goto xdp_xmit;
bb91accf 790 default:
0354e4d1 791 bpf_warn_invalid_xdp_action(act);
df561f66 792 fallthrough;
0354e4d1
JF
793 case XDP_ABORTED:
794 trace_xdp_exception(vi->dev, xdp_prog, act);
95efabf0 795 goto err_xdp;
0354e4d1 796 case XDP_DROP:
bb91accf
JW
797 goto err_xdp;
798 }
799 }
800 rcu_read_unlock();
801
f6b10209
JW
802 skb = build_skb(buf, buflen);
803 if (!skb) {
4941d472 804 put_page(page);
f6b10209
JW
805 goto err;
806 }
807 skb_reserve(skb, headroom - delta);
6870de43 808 skb_put(skb, len);
f1d4884d 809 if (!xdp_prog) {
f6b10209
JW
810 buf += header_offset;
811 memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
f1d4884d 812 } /* keep zeroed vnet hdr since XDP is loaded */
f6b10209 813
503d539a
YK
814 if (metasize)
815 skb_metadata_set(skb, metasize);
816
f6b10209 817err:
f121159d 818 return skb;
bb91accf
JW
819
820err_xdp:
821 rcu_read_unlock();
d46eeeaf
JW
822 stats->xdp_drops++;
823 stats->drops++;
4941d472 824 put_page(page);
bb91accf
JW
825xdp_xmit:
826 return NULL;
f121159d
MT
827}
828
829static struct sk_buff *receive_big(struct net_device *dev,
946fa564 830 struct virtnet_info *vi,
f121159d
MT
831 struct receive_queue *rq,
832 void *buf,
7d9d60fd 833 unsigned int len,
d46eeeaf 834 struct virtnet_rq_stats *stats)
f121159d
MT
835{
836 struct page *page = buf;
503d539a 837 struct sk_buff *skb =
fb32856b 838 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0, 0);
f600b690 839
d46eeeaf 840 stats->bytes += len - vi->hdr_len;
f121159d
MT
841 if (unlikely(!skb))
842 goto err;
843
844 return skb;
845
846err:
d46eeeaf 847 stats->drops++;
f121159d
MT
848 give_pages(rq, page);
849 return NULL;
850}
851
8fc3b9e9 852static struct sk_buff *receive_mergeable(struct net_device *dev,
fdd819b2 853 struct virtnet_info *vi,
8fc3b9e9 854 struct receive_queue *rq,
680557cf
MT
855 void *buf,
856 void *ctx,
186b3c99 857 unsigned int len,
7d9d60fd 858 unsigned int *xdp_xmit,
d46eeeaf 859 struct virtnet_rq_stats *stats)
9ab86bbc 860{
012873d0
MT
861 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
862 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
8fc3b9e9
MT
863 struct page *page = virt_to_head_page(buf);
864 int offset = buf - page_address(page);
f600b690
JF
865 struct sk_buff *head_skb, *curr_skb;
866 struct bpf_prog *xdp_prog;
9ce6146e 867 unsigned int truesize = mergeable_ctx_to_truesize(ctx);
4941d472 868 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
503d539a 869 unsigned int metasize = 0;
9ce6146e
JDB
870 unsigned int frame_sz;
871 int err;
f600b690 872
56434a01 873 head_skb = NULL;
d46eeeaf 874 stats->bytes += len - vi->hdr_len;
56434a01 875
f600b690
JF
876 rcu_read_lock();
877 xdp_prog = rcu_dereference(rq->xdp_prog);
878 if (xdp_prog) {
44fa2dbd 879 struct xdp_frame *xdpf;
72979a6c 880 struct page *xdp_page;
0354e4d1 881 struct xdp_buff xdp;
0354e4d1 882 void *data;
f600b690
JF
883 u32 act;
884
3d62b2a0
JW
885 /* Transient failure which in theory could occur if
886 * in-flight packets from before XDP was enabled reach
887 * the receive path after XDP is loaded.
888 */
889 if (unlikely(hdr->hdr.gso_type))
890 goto err_xdp;
891
9ce6146e
JDB
892 /* Buffers with headroom use PAGE_SIZE as alloc size,
893 * see add_recvbuf_mergeable() + get_mergeable_buf_len()
894 */
895 frame_sz = headroom ? PAGE_SIZE : truesize;
896
3cc81a9a
JW
897 /* This happens when rx buffer size is underestimated
898 * or headroom is not enough because of the buffer
899 * was refilled before XDP is set. This should only
900 * happen for the first several packets, so we don't
901 * care much about its performance.
902 */
4941d472
JW
903 if (unlikely(num_buf > 1 ||
904 headroom < virtnet_get_headroom(vi))) {
72979a6c 905 /* linearize data for XDP */
56a86f84 906 xdp_page = xdp_linearize_page(rq, &num_buf,
4941d472
JW
907 page, offset,
908 VIRTIO_XDP_HEADROOM,
909 &len);
9ce6146e
JDB
910 frame_sz = PAGE_SIZE;
911
72979a6c
JF
912 if (!xdp_page)
913 goto err_xdp;
2de2f7f4 914 offset = VIRTIO_XDP_HEADROOM;
72979a6c
JF
915 } else {
916 xdp_page = page;
f600b690
JF
917 }
918
2de2f7f4
JF
919 /* Allow consuming headroom but reserve enough space to push
920 * the descriptor on if we get an XDP_TX return code.
921 */
0354e4d1 922 data = page_address(xdp_page) + offset;
43b5169d 923 xdp_init_buff(&xdp, frame_sz - vi->hdr_len, &rq->xdp_rxq);
be9df4af
LB
924 xdp_prepare_buff(&xdp, data - VIRTIO_XDP_HEADROOM + vi->hdr_len,
925 VIRTIO_XDP_HEADROOM, len - vi->hdr_len, true);
754b8a21 926
0354e4d1 927 act = bpf_prog_run_xdp(xdp_prog, &xdp);
d46eeeaf 928 stats->xdp_packets++;
0354e4d1 929
56434a01
JF
930 switch (act) {
931 case XDP_PASS:
503d539a
YK
932 metasize = xdp.data - xdp.data_meta;
933
2de2f7f4 934 /* recalculate offset to account for any header
503d539a
YK
935 * adjustments and minus the metasize to copy the
936 * metadata in page_to_skb(). Note other cases do not
937 * build an skb and avoid using offset
2de2f7f4 938 */
503d539a
YK
939 offset = xdp.data - page_address(xdp_page) -
940 vi->hdr_len - metasize;
2de2f7f4 941
503d539a
YK
942 /* recalculate len if xdp.data, xdp.data_end or
943 * xdp.data_meta were adjusted
6870de43 944 */
503d539a 945 len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
1830f893
JW
946 /* We can only create skb based on xdp_page. */
947 if (unlikely(xdp_page != page)) {
948 rcu_read_unlock();
949 put_page(page);
503d539a
YK
950 head_skb = page_to_skb(vi, rq, xdp_page, offset,
951 len, PAGE_SIZE, false,
fb32856b 952 metasize, headroom);
1830f893
JW
953 return head_skb;
954 }
56434a01
JF
955 break;
956 case XDP_TX:
d46eeeaf 957 stats->xdp_tx++;
1b698fa5 958 xdpf = xdp_convert_buff_to_frame(&xdp);
44fa2dbd
JDB
959 if (unlikely(!xdpf))
960 goto err_xdp;
ca9e83b4 961 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
fdc13979
LB
962 if (unlikely(!err)) {
963 xdp_return_frame_rx_napi(xdpf);
964 } else if (unlikely(err < 0)) {
0354e4d1 965 trace_xdp_exception(vi->dev, xdp_prog, act);
11b7d897
JDB
966 if (unlikely(xdp_page != page))
967 put_page(xdp_page);
968 goto err_xdp;
969 }
2471c75e 970 *xdp_xmit |= VIRTIO_XDP_TX;
72979a6c 971 if (unlikely(xdp_page != page))
5d458a13 972 put_page(page);
56434a01
JF
973 rcu_read_unlock();
974 goto xdp_xmit;
3cc81a9a 975 case XDP_REDIRECT:
d46eeeaf 976 stats->xdp_redirects++;
3cc81a9a
JW
977 err = xdp_do_redirect(dev, &xdp, xdp_prog);
978 if (err) {
979 if (unlikely(xdp_page != page))
980 put_page(xdp_page);
981 goto err_xdp;
982 }
2471c75e 983 *xdp_xmit |= VIRTIO_XDP_REDIR;
3cc81a9a 984 if (unlikely(xdp_page != page))
6890418b 985 put_page(page);
3cc81a9a
JW
986 rcu_read_unlock();
987 goto xdp_xmit;
56434a01 988 default:
0354e4d1 989 bpf_warn_invalid_xdp_action(act);
df561f66 990 fallthrough;
0354e4d1
JF
991 case XDP_ABORTED:
992 trace_xdp_exception(vi->dev, xdp_prog, act);
df561f66 993 fallthrough;
0354e4d1 994 case XDP_DROP:
72979a6c
JF
995 if (unlikely(xdp_page != page))
996 __free_pages(xdp_page, 0);
f600b690 997 goto err_xdp;
56434a01 998 }
f600b690
JF
999 }
1000 rcu_read_unlock();
ab7db917 1001
28b39bc7 1002 if (unlikely(len > truesize)) {
56da5fd0 1003 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
680557cf
MT
1004 dev->name, len, (unsigned long)ctx);
1005 dev->stats.rx_length_errors++;
1006 goto err_skb;
1007 }
28b39bc7 1008
503d539a 1009 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
fb32856b 1010 metasize, headroom);
f600b690 1011 curr_skb = head_skb;
9ab86bbc 1012
8fc3b9e9
MT
1013 if (unlikely(!curr_skb))
1014 goto err_skb;
9ab86bbc 1015 while (--num_buf) {
8fc3b9e9
MT
1016 int num_skb_frags;
1017
680557cf 1018 buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
03e9f8a0 1019 if (unlikely(!buf)) {
8fc3b9e9 1020 pr_debug("%s: rx error: %d buffers out of %d missing\n",
fdd819b2 1021 dev->name, num_buf,
012873d0
MT
1022 virtio16_to_cpu(vi->vdev,
1023 hdr->num_buffers));
8fc3b9e9
MT
1024 dev->stats.rx_length_errors++;
1025 goto err_buf;
3f2c31d9 1026 }
8fc3b9e9 1027
d46eeeaf 1028 stats->bytes += len;
8fc3b9e9 1029 page = virt_to_head_page(buf);
28b39bc7
JW
1030
1031 truesize = mergeable_ctx_to_truesize(ctx);
1032 if (unlikely(len > truesize)) {
56da5fd0 1033 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
680557cf
MT
1034 dev->name, len, (unsigned long)ctx);
1035 dev->stats.rx_length_errors++;
1036 goto err_skb;
1037 }
8fc3b9e9
MT
1038
1039 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
2613af0e
MD
1040 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
1041 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
8fc3b9e9
MT
1042
1043 if (unlikely(!nskb))
1044 goto err_skb;
2613af0e
MD
1045 if (curr_skb == head_skb)
1046 skb_shinfo(curr_skb)->frag_list = nskb;
1047 else
1048 curr_skb->next = nskb;
1049 curr_skb = nskb;
1050 head_skb->truesize += nskb->truesize;
1051 num_skb_frags = 0;
1052 }
1053 if (curr_skb != head_skb) {
1054 head_skb->data_len += len;
1055 head_skb->len += len;
fb51879d 1056 head_skb->truesize += truesize;
2613af0e 1057 }
8fc3b9e9 1058 offset = buf - page_address(page);
ba275241
JW
1059 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
1060 put_page(page);
1061 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
fb51879d 1062 len, truesize);
ba275241
JW
1063 } else {
1064 skb_add_rx_frag(curr_skb, num_skb_frags, page,
fb51879d 1065 offset, len, truesize);
ba275241 1066 }
8fc3b9e9
MT
1067 }
1068
5377d758 1069 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
8fc3b9e9
MT
1070 return head_skb;
1071
f600b690
JF
1072err_xdp:
1073 rcu_read_unlock();
d46eeeaf 1074 stats->xdp_drops++;
8fc3b9e9
MT
1075err_skb:
1076 put_page(page);
850e088d 1077 while (num_buf-- > 1) {
680557cf
MT
1078 buf = virtqueue_get_buf(rq->vq, &len);
1079 if (unlikely(!buf)) {
8fc3b9e9
MT
1080 pr_debug("%s: rx error: %d buffers missing\n",
1081 dev->name, num_buf);
1082 dev->stats.rx_length_errors++;
1083 break;
1084 }
d46eeeaf 1085 stats->bytes += len;
680557cf 1086 page = virt_to_head_page(buf);
8fc3b9e9 1087 put_page(page);
9ab86bbc 1088 }
8fc3b9e9 1089err_buf:
d46eeeaf 1090 stats->drops++;
8fc3b9e9 1091 dev_kfree_skb(head_skb);
56434a01 1092xdp_xmit:
8fc3b9e9 1093 return NULL;
9ab86bbc
SM
1094}
1095
7d9d60fd
TM
1096static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
1097 void *buf, unsigned int len, void **ctx,
a0929a44 1098 unsigned int *xdp_xmit,
d46eeeaf 1099 struct virtnet_rq_stats *stats)
9ab86bbc 1100{
e9d7417b 1101 struct net_device *dev = vi->dev;
9ab86bbc 1102 struct sk_buff *skb;
012873d0 1103 struct virtio_net_hdr_mrg_rxbuf *hdr;
3f2c31d9 1104
bcff3162 1105 if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
9ab86bbc
SM
1106 pr_debug("%s: short packet %i\n", dev->name, len);
1107 dev->stats.rx_length_errors++;
ab7db917 1108 if (vi->mergeable_rx_bufs) {
680557cf 1109 put_page(virt_to_head_page(buf));
ab7db917 1110 } else if (vi->big_packets) {
98bfd23c 1111 give_pages(rq, buf);
ab7db917 1112 } else {
f6b10209 1113 put_page(virt_to_head_page(buf));
ab7db917 1114 }
7d9d60fd 1115 return;
9ab86bbc 1116 }
3f2c31d9 1117
f121159d 1118 if (vi->mergeable_rx_bufs)
7d9d60fd 1119 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
a0929a44 1120 stats);
f121159d 1121 else if (vi->big_packets)
a0929a44 1122 skb = receive_big(dev, vi, rq, buf, len, stats);
f121159d 1123 else
a0929a44 1124 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
f121159d
MT
1125
1126 if (unlikely(!skb))
7d9d60fd 1127 return;
3f2c31d9 1128
9ab86bbc 1129 hdr = skb_vnet_hdr(skb);
3fa2a1df 1130
e858fae2 1131 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
10a8d94a 1132 skb->ip_summed = CHECKSUM_UNNECESSARY;
296f96fc 1133
e858fae2
MR
1134 if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
1135 virtio_is_little_endian(vi->vdev))) {
1136 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
1137 dev->name, hdr->hdr.gso_type,
1138 hdr->hdr.gso_size);
1139 goto frame_err;
296f96fc
RR
1140 }
1141
133bbb18 1142 skb_record_rx_queue(skb, vq2rxq(rq->vq));
d1dc06dc
MR
1143 skb->protocol = eth_type_trans(skb, dev);
1144 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
1145 ntohs(skb->protocol), skb->len, skb->pkt_type);
1146
0fbd050a 1147 napi_gro_receive(&rq->napi, skb);
7d9d60fd 1148 return;
296f96fc
RR
1149
1150frame_err:
1151 dev->stats.rx_frame_errors++;
296f96fc
RR
1152 dev_kfree_skb(skb);
1153}
1154
192f68cf
JW
1155/* Unlike mergeable buffers, all buffers are allocated to the
1156 * same size, except for the headroom. For this reason we do
1157 * not need to use mergeable_len_to_ctx here - it is enough
1158 * to store the headroom as the context ignoring the truesize.
1159 */
946fa564
MT
1160static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
1161 gfp_t gfp)
296f96fc 1162{
f6b10209
JW
1163 struct page_frag *alloc_frag = &rq->alloc_frag;
1164 char *buf;
2de2f7f4 1165 unsigned int xdp_headroom = virtnet_get_headroom(vi);
192f68cf 1166 void *ctx = (void *)(unsigned long)xdp_headroom;
f6b10209 1167 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
9ab86bbc 1168 int err;
3f2c31d9 1169
f6b10209
JW
1170 len = SKB_DATA_ALIGN(len) +
1171 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1172 if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
9ab86bbc 1173 return -ENOMEM;
296f96fc 1174
f6b10209
JW
1175 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1176 get_page(alloc_frag->page);
1177 alloc_frag->offset += len;
1178 sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom,
1179 vi->hdr_len + GOOD_PACKET_LEN);
192f68cf 1180 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
9ab86bbc 1181 if (err < 0)
f6b10209 1182 put_page(virt_to_head_page(buf));
9ab86bbc
SM
1183 return err;
1184}
97402b96 1185
012873d0
MT
1186static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
1187 gfp_t gfp)
9ab86bbc 1188{
9ab86bbc
SM
1189 struct page *first, *list = NULL;
1190 char *p;
1191 int i, err, offset;
1192
a5835440
RR
1193 sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
1194
e9d7417b 1195 /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
9ab86bbc 1196 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
e9d7417b 1197 first = get_a_page(rq, gfp);
9ab86bbc
SM
1198 if (!first) {
1199 if (list)
e9d7417b 1200 give_pages(rq, list);
9ab86bbc 1201 return -ENOMEM;
97402b96 1202 }
e9d7417b 1203 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
97402b96 1204
9ab86bbc
SM
1205 /* chain new page in list head to match sg */
1206 first->private = (unsigned long)list;
1207 list = first;
1208 }
296f96fc 1209
e9d7417b 1210 first = get_a_page(rq, gfp);
9ab86bbc 1211 if (!first) {
e9d7417b 1212 give_pages(rq, list);
9ab86bbc
SM
1213 return -ENOMEM;
1214 }
1215 p = page_address(first);
1216
e9d7417b 1217 /* rq->sg[0], rq->sg[1] share the same page */
012873d0
MT
1218 /* a separated rq->sg[0] for header - required in case !any_header_sg */
1219 sg_set_buf(&rq->sg[0], p, vi->hdr_len);
9ab86bbc 1220
e9d7417b 1221 /* rq->sg[1] for data packet, from offset */
9ab86bbc 1222 offset = sizeof(struct padded_vnet_hdr);
e9d7417b 1223 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
9ab86bbc
SM
1224
1225 /* chain first in list head */
1226 first->private = (unsigned long)list;
9dc7b9e4
RR
1227 err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
1228 first, gfp);
9ab86bbc 1229 if (err < 0)
e9d7417b 1230 give_pages(rq, first);
9ab86bbc
SM
1231
1232 return err;
296f96fc
RR
1233}
1234
d85b758f 1235static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
3cc81a9a
JW
1236 struct ewma_pkt_len *avg_pkt_len,
1237 unsigned int room)
3f2c31d9 1238{
ab7db917 1239 const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
fbf28d78
MD
1240 unsigned int len;
1241
3cc81a9a
JW
1242 if (room)
1243 return PAGE_SIZE - room;
1244
1245 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
f0c3192c 1246 rq->min_buf_len, PAGE_SIZE - hdr_len);
3cc81a9a 1247
e377fcc8 1248 return ALIGN(len, L1_CACHE_BYTES);
fbf28d78
MD
1249}
1250
2de2f7f4
JF
1251static int add_recvbuf_mergeable(struct virtnet_info *vi,
1252 struct receive_queue *rq, gfp_t gfp)
fbf28d78 1253{
fb51879d 1254 struct page_frag *alloc_frag = &rq->alloc_frag;
2de2f7f4 1255 unsigned int headroom = virtnet_get_headroom(vi);
3cc81a9a
JW
1256 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1257 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
fb51879d 1258 char *buf;
680557cf 1259 void *ctx;
3f2c31d9 1260 int err;
fb51879d 1261 unsigned int len, hole;
3f2c31d9 1262
3cc81a9a
JW
1263 /* Extra tailroom is needed to satisfy XDP's assumption. This
1264 * means rx frags coalescing won't work, but consider we've
1265 * disabled GSO for XDP, it won't be a big issue.
1266 */
1267 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
1268 if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
9ab86bbc 1269 return -ENOMEM;
ab7db917 1270
fb51879d 1271 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
2de2f7f4 1272 buf += headroom; /* advance address leaving hole at front of pkt */
fb51879d 1273 get_page(alloc_frag->page);
3cc81a9a 1274 alloc_frag->offset += len + room;
fb51879d 1275 hole = alloc_frag->size - alloc_frag->offset;
3cc81a9a 1276 if (hole < len + room) {
ab7db917
MD
1277 /* To avoid internal fragmentation, if there is very likely not
1278 * enough space for another buffer, add the remaining space to
1daa8790 1279 * the current buffer.
ab7db917 1280 */
fb51879d
MD
1281 len += hole;
1282 alloc_frag->offset += hole;
1283 }
3f2c31d9 1284
fb51879d 1285 sg_init_one(rq->sg, buf, len);
29fda25a 1286 ctx = mergeable_len_to_ctx(len, headroom);
680557cf 1287 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
9ab86bbc 1288 if (err < 0)
2613af0e 1289 put_page(virt_to_head_page(buf));
3f2c31d9 1290
9ab86bbc
SM
1291 return err;
1292}
3f2c31d9 1293
b2baed69
RR
1294/*
1295 * Returns false if we couldn't fill entirely (OOM).
1296 *
1297 * Normally run in the receive path, but can also be run from ndo_open
1298 * before we're receiving packets, or from refill_work which is
1299 * careful to disable receiving (using napi_disable).
1300 */
946fa564
MT
1301static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
1302 gfp_t gfp)
9ab86bbc
SM
1303{
1304 int err;
1788f495 1305 bool oom;
3f2c31d9 1306
9ab86bbc
SM
1307 do {
1308 if (vi->mergeable_rx_bufs)
2de2f7f4 1309 err = add_recvbuf_mergeable(vi, rq, gfp);
9ab86bbc 1310 else if (vi->big_packets)
012873d0 1311 err = add_recvbuf_big(vi, rq, gfp);
9ab86bbc 1312 else
946fa564 1313 err = add_recvbuf_small(vi, rq, gfp);
3f2c31d9 1314
1788f495 1315 oom = err == -ENOMEM;
9ed4cb07 1316 if (err)
3f2c31d9 1317 break;
b7dfde95 1318 } while (rq->vq->num_free);
461f03dc 1319 if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
01c32598
MT
1320 unsigned long flags;
1321
1322 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
d46eeeaf 1323 rq->stats.kicks++;
01c32598 1324 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
461f03dc
TM
1325 }
1326
3161e453 1327 return !oom;
3f2c31d9
MM
1328}
1329
18445c4d 1330static void skb_recv_done(struct virtqueue *rvq)
296f96fc
RR
1331{
1332 struct virtnet_info *vi = rvq->vdev->priv;
986a4f4d 1333 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
e9d7417b 1334
e4e8452a 1335 virtqueue_napi_schedule(&rq->napi, rvq);
296f96fc
RR
1336}
1337
e4e8452a 1338static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
3e9d08ec 1339{
e4e8452a 1340 napi_enable(napi);
3e9d08ec
BR
1341
1342 /* If all buffers were filled by other side before we napi_enabled, we
e4e8452a
WB
1343 * won't get another interrupt, so process any outstanding packets now.
1344 * Call local_bh_enable after to trigger softIRQ processing.
1345 */
1346 local_bh_disable();
1347 virtqueue_napi_schedule(napi, vq);
1348 local_bh_enable();
3e9d08ec
BR
1349}
1350
b92f1e67
WB
1351static void virtnet_napi_tx_enable(struct virtnet_info *vi,
1352 struct virtqueue *vq,
1353 struct napi_struct *napi)
1354{
1355 if (!napi->weight)
1356 return;
1357
1358 /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
1359 * enable the feature if this is likely affine with the transmit path.
1360 */
1361 if (!vi->affinity_hint_set) {
1362 napi->weight = 0;
1363 return;
1364 }
1365
1366 return virtnet_napi_enable(vq, napi);
1367}
1368
78a57b48
WB
1369static void virtnet_napi_tx_disable(struct napi_struct *napi)
1370{
1371 if (napi->weight)
1372 napi_disable(napi);
1373}
1374
3161e453
RR
1375static void refill_work(struct work_struct *work)
1376{
e9d7417b
JW
1377 struct virtnet_info *vi =
1378 container_of(work, struct virtnet_info, refill.work);
3161e453 1379 bool still_empty;
986a4f4d
JW
1380 int i;
1381
55257d72 1382 for (i = 0; i < vi->curr_queue_pairs; i++) {
986a4f4d 1383 struct receive_queue *rq = &vi->rq[i];
3161e453 1384
986a4f4d 1385 napi_disable(&rq->napi);
946fa564 1386 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
e4e8452a 1387 virtnet_napi_enable(rq->vq, &rq->napi);
3161e453 1388
986a4f4d
JW
1389 /* In theory, this can happen: if we don't get any buffers in
1390 * we will *never* try to fill again.
1391 */
1392 if (still_empty)
1393 schedule_delayed_work(&vi->refill, HZ/2);
1394 }
3161e453
RR
1395}
1396
2471c75e
JDB
1397static int virtnet_receive(struct receive_queue *rq, int budget,
1398 unsigned int *xdp_xmit)
296f96fc 1399{
e9d7417b 1400 struct virtnet_info *vi = rq->vq->vdev->priv;
d46eeeaf 1401 struct virtnet_rq_stats stats = {};
a0929a44 1402 unsigned int len;
9ab86bbc 1403 void *buf;
a0929a44 1404 int i;
296f96fc 1405
192f68cf 1406 if (!vi->big_packets || vi->mergeable_rx_bufs) {
680557cf
MT
1407 void *ctx;
1408
d46eeeaf 1409 while (stats.packets < budget &&
680557cf 1410 (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
a0929a44 1411 receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
d46eeeaf 1412 stats.packets++;
680557cf
MT
1413 }
1414 } else {
d46eeeaf 1415 while (stats.packets < budget &&
680557cf 1416 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
a0929a44 1417 receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
d46eeeaf 1418 stats.packets++;
680557cf 1419 }
296f96fc
RR
1420 }
1421
718be6ba 1422 if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
946fa564 1423 if (!try_fill_recv(vi, rq, GFP_ATOMIC))
3b07e9ca 1424 schedule_delayed_work(&vi->refill, 0);
3161e453 1425 }
296f96fc 1426
d7dfc5cf 1427 u64_stats_update_begin(&rq->stats.syncp);
a0929a44
TM
1428 for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
1429 size_t offset = virtnet_rq_stats_desc[i].offset;
1430 u64 *item;
1431
d46eeeaf
JW
1432 item = (u64 *)((u8 *)&rq->stats + offset);
1433 *item += *(u64 *)((u8 *)&stats + offset);
a0929a44 1434 }
d7dfc5cf 1435 u64_stats_update_end(&rq->stats.syncp);
61845d20 1436
d46eeeaf 1437 return stats.packets;
2ffa7598
JW
1438}
1439
df133f3f 1440static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
ea7735d9 1441{
ea7735d9 1442 unsigned int len;
ea7735d9
WB
1443 unsigned int packets = 0;
1444 unsigned int bytes = 0;
5050471d 1445 void *ptr;
ea7735d9 1446
5050471d
TM
1447 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
1448 if (likely(!is_xdp_frame(ptr))) {
1449 struct sk_buff *skb = ptr;
ea7735d9 1450
5050471d 1451 pr_debug("Sent skb %p\n", skb);
ea7735d9 1452
5050471d
TM
1453 bytes += skb->len;
1454 napi_consume_skb(skb, in_napi);
1455 } else {
1456 struct xdp_frame *frame = ptr_to_xdp(ptr);
ea7735d9 1457
5050471d
TM
1458 bytes += frame->len;
1459 xdp_return_frame(frame);
1460 }
1461 packets++;
ea7735d9
WB
1462 }
1463
1464 /* Avoid overhead when no packets have been processed
1465 * happens when called speculatively from start_xmit.
1466 */
1467 if (!packets)
1468 return;
1469
d7dfc5cf
TM
1470 u64_stats_update_begin(&sq->stats.syncp);
1471 sq->stats.bytes += bytes;
1472 sq->stats.packets += packets;
1473 u64_stats_update_end(&sq->stats.syncp);
ea7735d9
WB
1474}
1475
534da5e8
TM
1476static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
1477{
1478 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
1479 return false;
1480 else if (q < vi->curr_queue_pairs)
1481 return true;
1482 else
1483 return false;
1484}
1485
7b0411ef
WB
1486static void virtnet_poll_cleantx(struct receive_queue *rq)
1487{
1488 struct virtnet_info *vi = rq->vq->vdev->priv;
1489 unsigned int index = vq2rxq(rq->vq);
1490 struct send_queue *sq = &vi->sq[index];
1491 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
1492
534da5e8 1493 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
7b0411ef
WB
1494 return;
1495
1496 if (__netif_tx_trylock(txq)) {
df133f3f 1497 free_old_xmit_skbs(sq, true);
7b0411ef
WB
1498 __netif_tx_unlock(txq);
1499 }
1500
1501 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
1502 netif_tx_wake_queue(txq);
1503}
1504
2ffa7598
JW
1505static int virtnet_poll(struct napi_struct *napi, int budget)
1506{
1507 struct receive_queue *rq =
1508 container_of(napi, struct receive_queue, napi);
9267c430
JW
1509 struct virtnet_info *vi = rq->vq->vdev->priv;
1510 struct send_queue *sq;
2a43565c 1511 unsigned int received;
2471c75e 1512 unsigned int xdp_xmit = 0;
2ffa7598 1513
7b0411ef
WB
1514 virtnet_poll_cleantx(rq);
1515
186b3c99 1516 received = virtnet_receive(rq, budget, &xdp_xmit);
2ffa7598 1517
8329d98e 1518 /* Out of packets? */
e4e8452a
WB
1519 if (received < budget)
1520 virtqueue_napi_complete(napi, rq->vq, received);
296f96fc 1521
2471c75e 1522 if (xdp_xmit & VIRTIO_XDP_REDIR)
1d233886 1523 xdp_do_flush();
2471c75e
JDB
1524
1525 if (xdp_xmit & VIRTIO_XDP_TX) {
97c2c69e 1526 sq = virtnet_xdp_get_sq(vi);
461f03dc
TM
1527 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
1528 u64_stats_update_begin(&sq->stats.syncp);
1529 sq->stats.kicks++;
1530 u64_stats_update_end(&sq->stats.syncp);
1531 }
97c2c69e 1532 virtnet_xdp_put_sq(vi, sq);
9267c430 1533 }
186b3c99 1534
296f96fc
RR
1535 return received;
1536}
1537
986a4f4d
JW
1538static int virtnet_open(struct net_device *dev)
1539{
1540 struct virtnet_info *vi = netdev_priv(dev);
754b8a21 1541 int i, err;
986a4f4d 1542
e4166625
JW
1543 for (i = 0; i < vi->max_queue_pairs; i++) {
1544 if (i < vi->curr_queue_pairs)
1545 /* Make sure we have some buffers: if oom use wq. */
946fa564 1546 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
e4166625 1547 schedule_delayed_work(&vi->refill, 0);
754b8a21 1548
b02e5a0e 1549 err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id);
754b8a21
JDB
1550 if (err < 0)
1551 return err;
1552
8d5d8852
JDB
1553 err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq,
1554 MEM_TYPE_PAGE_SHARED, NULL);
1555 if (err < 0) {
1556 xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
1557 return err;
1558 }
1559
e4e8452a 1560 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
b92f1e67 1561 virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
986a4f4d
JW
1562 }
1563
1564 return 0;
1565}
1566
b92f1e67
WB
1567static int virtnet_poll_tx(struct napi_struct *napi, int budget)
1568{
1569 struct send_queue *sq = container_of(napi, struct send_queue, napi);
1570 struct virtnet_info *vi = sq->vq->vdev->priv;
534da5e8
TM
1571 unsigned int index = vq2txq(sq->vq);
1572 struct netdev_queue *txq;
b92f1e67 1573
534da5e8
TM
1574 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
1575 /* We don't need to enable cb for XDP */
1576 napi_complete_done(napi, 0);
1577 return 0;
1578 }
1579
1580 txq = netdev_get_tx_queue(vi->dev, index);
b92f1e67 1581 __netif_tx_lock(txq, raw_smp_processor_id());
df133f3f 1582 free_old_xmit_skbs(sq, true);
b92f1e67
WB
1583 __netif_tx_unlock(txq);
1584
1585 virtqueue_napi_complete(napi, sq->vq, 0);
1586
1587 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
1588 netif_tx_wake_queue(txq);
1589
1590 return 0;
1591}
1592
e9d7417b 1593static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
296f96fc 1594{
012873d0 1595 struct virtio_net_hdr_mrg_rxbuf *hdr;
296f96fc 1596 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
e9d7417b 1597 struct virtnet_info *vi = sq->vq->vdev->priv;
e2fcad58 1598 int num_sg;
012873d0 1599 unsigned hdr_len = vi->hdr_len;
e7428e95 1600 bool can_push;
296f96fc 1601
e174961c 1602 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
e7428e95
MT
1603
1604 can_push = vi->any_header_sg &&
1605 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
1606 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
1607 /* Even if we can, don't push here yet as this would skew
1608 * csum_start offset below. */
1609 if (can_push)
012873d0 1610 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
e7428e95
MT
1611 else
1612 hdr = skb_vnet_hdr(skb);
296f96fc 1613
e858fae2 1614 if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
fd3a8862
WB
1615 virtio_is_little_endian(vi->vdev), false,
1616 0))
e858fae2 1617 BUG();
296f96fc 1618
3f2c31d9 1619 if (vi->mergeable_rx_bufs)
012873d0 1620 hdr->num_buffers = 0;
3f2c31d9 1621
547c890c 1622 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
e7428e95
MT
1623 if (can_push) {
1624 __skb_push(skb, hdr_len);
1625 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
e2fcad58
JD
1626 if (unlikely(num_sg < 0))
1627 return num_sg;
e7428e95
MT
1628 /* Pull header back to avoid skew in tx bytes calculations. */
1629 __skb_pull(skb, hdr_len);
1630 } else {
1631 sg_set_buf(sq->sg, hdr, hdr_len);
e2fcad58
JD
1632 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
1633 if (unlikely(num_sg < 0))
1634 return num_sg;
1635 num_sg++;
e7428e95 1636 }
9dc7b9e4 1637 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
11a3a154
RR
1638}
1639
424efe9c 1640static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
99ffc696
RR
1641{
1642 struct virtnet_info *vi = netdev_priv(dev);
986a4f4d
JW
1643 int qnum = skb_get_queue_mapping(skb);
1644 struct send_queue *sq = &vi->sq[qnum];
9ed4cb07 1645 int err;
4b7fd2e6 1646 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
6b16f9ee 1647 bool kick = !netdev_xmit_more();
b92f1e67 1648 bool use_napi = sq->napi.weight;
2cb9c6ba 1649
2cb9c6ba 1650 /* Free up any pending old buffers before queueing new ones. */
df133f3f 1651 free_old_xmit_skbs(sq, false);
99ffc696 1652
bdb12e0d
WB
1653 if (use_napi && kick)
1654 virtqueue_enable_cb_delayed(sq->vq);
1655
074c3582
JK
1656 /* timestamp packet in software */
1657 skb_tx_timestamp(skb);
1658
03f191ba 1659 /* Try to transmit */
b7dfde95 1660 err = xmit_skb(sq, skb);
48925e37 1661
9ed4cb07 1662 /* This should not happen! */
681daee2 1663 if (unlikely(err)) {
9ed4cb07
RR
1664 dev->stats.tx_fifo_errors++;
1665 if (net_ratelimit())
1666 dev_warn(&dev->dev,
7934b481
YS
1667 "Unexpected TXQ (%d) queue failure: %d\n",
1668 qnum, err);
58eba97d 1669 dev->stats.tx_dropped++;
85e94525 1670 dev_kfree_skb_any(skb);
58eba97d 1671 return NETDEV_TX_OK;
296f96fc 1672 }
03f191ba 1673
48925e37 1674 /* Don't wait up for transmitted skbs to be freed. */
b92f1e67
WB
1675 if (!use_napi) {
1676 skb_orphan(skb);
895b5c9f 1677 nf_reset_ct(skb);
b92f1e67 1678 }
48925e37 1679
60302ff6
MT
1680 /* If running out of space, stop queue to avoid getting packets that we
1681 * are then unable to transmit.
1682 * An alternative would be to force queuing layer to requeue the skb by
1683 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
1684 * returned in a normal path of operation: it means that driver is not
1685 * maintaining the TX queue stop/start state properly, and causes
1686 * the stack to do a non-trivial amount of useless work.
1687 * Since most packets only take 1 or 2 ring slots, stopping the queue
1688 * early means 16 slots are typically wasted.
d631b94e 1689 */
b7dfde95 1690 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
986a4f4d 1691 netif_stop_subqueue(dev, qnum);
b92f1e67
WB
1692 if (!use_napi &&
1693 unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
48925e37 1694 /* More just got used, free them then recheck. */
df133f3f 1695 free_old_xmit_skbs(sq, false);
b7dfde95 1696 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
986a4f4d 1697 netif_start_subqueue(dev, qnum);
e9d7417b 1698 virtqueue_disable_cb(sq->vq);
48925e37
RR
1699 }
1700 }
99ffc696 1701 }
48925e37 1702
461f03dc
TM
1703 if (kick || netif_xmit_stopped(txq)) {
1704 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
1705 u64_stats_update_begin(&sq->stats.syncp);
1706 sq->stats.kicks++;
1707 u64_stats_update_end(&sq->stats.syncp);
1708 }
1709 }
296f96fc 1710
0b725a2c 1711 return NETDEV_TX_OK;
c223a078
DM
1712}
1713
40cbfc37
AK
1714/*
1715 * Send command via the control virtqueue and check status. Commands
1716 * supported by the hypervisor, as indicated by feature bits, should
788a8b6d 1717 * never fail unless improperly formatted.
40cbfc37
AK
1718 */
1719static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
d24bae32 1720 struct scatterlist *out)
40cbfc37 1721{
f7bc9594 1722 struct scatterlist *sgs[4], hdr, stat;
d24bae32 1723 unsigned out_num = 0, tmp;
40cbfc37
AK
1724
1725 /* Caller should know better */
f7bc9594 1726 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
40cbfc37 1727
12e57169
MT
1728 vi->ctrl->status = ~0;
1729 vi->ctrl->hdr.class = class;
1730 vi->ctrl->hdr.cmd = cmd;
f7bc9594 1731 /* Add header */
12e57169 1732 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
f7bc9594 1733 sgs[out_num++] = &hdr;
40cbfc37 1734
f7bc9594
RR
1735 if (out)
1736 sgs[out_num++] = out;
40cbfc37 1737
f7bc9594 1738 /* Add return status. */
12e57169 1739 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
d24bae32 1740 sgs[out_num] = &stat;
40cbfc37 1741
d24bae32 1742 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
a7c58146 1743 virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
40cbfc37 1744
67975901 1745 if (unlikely(!virtqueue_kick(vi->cvq)))
12e57169 1746 return vi->ctrl->status == VIRTIO_NET_OK;
40cbfc37
AK
1747
1748 /* Spin for a response, the kick causes an ioport write, trapping
1749 * into the hypervisor, so the request should be handled immediately.
1750 */
047b9b94
HG
1751 while (!virtqueue_get_buf(vi->cvq, &tmp) &&
1752 !virtqueue_is_broken(vi->cvq))
40cbfc37
AK
1753 cpu_relax();
1754
12e57169 1755 return vi->ctrl->status == VIRTIO_NET_OK;
40cbfc37
AK
1756}
1757
9c46f6d4
AW
1758static int virtnet_set_mac_address(struct net_device *dev, void *p)
1759{
1760 struct virtnet_info *vi = netdev_priv(dev);
1761 struct virtio_device *vdev = vi->vdev;
f2f2c8b4 1762 int ret;
e37e2ff3 1763 struct sockaddr *addr;
7e58d5ae 1764 struct scatterlist sg;
9c46f6d4 1765
ba5e4426
SS
1766 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
1767 return -EOPNOTSUPP;
1768
801822d1 1769 addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
e37e2ff3
AL
1770 if (!addr)
1771 return -ENOMEM;
e37e2ff3
AL
1772
1773 ret = eth_prepare_mac_addr_change(dev, addr);
f2f2c8b4 1774 if (ret)
e37e2ff3 1775 goto out;
9c46f6d4 1776
7e58d5ae
AK
1777 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
1778 sg_init_one(&sg, addr->sa_data, dev->addr_len);
1779 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
d24bae32 1780 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
7e58d5ae
AK
1781 dev_warn(&vdev->dev,
1782 "Failed to set mac address by vq command.\n");
e37e2ff3
AL
1783 ret = -EINVAL;
1784 goto out;
7e58d5ae 1785 }
7e93a02f
MT
1786 } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
1787 !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
855e0c52
RR
1788 unsigned int i;
1789
1790 /* Naturally, this has an atomicity problem. */
1791 for (i = 0; i < dev->addr_len; i++)
1792 virtio_cwrite8(vdev,
1793 offsetof(struct virtio_net_config, mac) +
1794 i, addr->sa_data[i]);
7e58d5ae
AK
1795 }
1796
1797 eth_commit_mac_addr_change(dev, p);
e37e2ff3 1798 ret = 0;
9c46f6d4 1799
e37e2ff3
AL
1800out:
1801 kfree(addr);
1802 return ret;
9c46f6d4
AW
1803}
1804
bc1f4470 1805static void virtnet_stats(struct net_device *dev,
1806 struct rtnl_link_stats64 *tot)
3fa2a1df 1807{
1808 struct virtnet_info *vi = netdev_priv(dev);
3fa2a1df 1809 unsigned int start;
d7dfc5cf 1810 int i;
3fa2a1df 1811
d7dfc5cf 1812 for (i = 0; i < vi->max_queue_pairs; i++) {
2c4a2f7d 1813 u64 tpackets, tbytes, rpackets, rbytes, rdrops;
d7dfc5cf
TM
1814 struct receive_queue *rq = &vi->rq[i];
1815 struct send_queue *sq = &vi->sq[i];
3fa2a1df 1816
1817 do {
d7dfc5cf
TM
1818 start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
1819 tpackets = sq->stats.packets;
1820 tbytes = sq->stats.bytes;
1821 } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
83a27052
ED
1822
1823 do {
d7dfc5cf 1824 start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
d46eeeaf
JW
1825 rpackets = rq->stats.packets;
1826 rbytes = rq->stats.bytes;
1827 rdrops = rq->stats.drops;
d7dfc5cf 1828 } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
3fa2a1df 1829
1830 tot->rx_packets += rpackets;
1831 tot->tx_packets += tpackets;
1832 tot->rx_bytes += rbytes;
1833 tot->tx_bytes += tbytes;
2c4a2f7d 1834 tot->rx_dropped += rdrops;
3fa2a1df 1835 }
1836
1837 tot->tx_dropped = dev->stats.tx_dropped;
021ac8d3 1838 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
3fa2a1df 1839 tot->rx_length_errors = dev->stats.rx_length_errors;
1840 tot->rx_frame_errors = dev->stats.rx_frame_errors;
3fa2a1df 1841}
1842
586d17c5
JW
1843static void virtnet_ack_link_announce(struct virtnet_info *vi)
1844{
1845 rtnl_lock();
1846 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
d24bae32 1847 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
586d17c5
JW
1848 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
1849 rtnl_unlock();
1850}
1851
47315329 1852static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
986a4f4d
JW
1853{
1854 struct scatterlist sg;
986a4f4d
JW
1855 struct net_device *dev = vi->dev;
1856
1857 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
1858 return 0;
1859
12e57169
MT
1860 vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
1861 sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
986a4f4d
JW
1862
1863 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
d24bae32 1864 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
986a4f4d
JW
1865 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
1866 queue_pairs);
1867 return -EINVAL;
55257d72 1868 } else {
986a4f4d 1869 vi->curr_queue_pairs = queue_pairs;
35ed159b
JW
1870 /* virtnet_open() will refill when device is going to up. */
1871 if (dev->flags & IFF_UP)
1872 schedule_delayed_work(&vi->refill, 0);
55257d72 1873 }
986a4f4d
JW
1874
1875 return 0;
1876}
1877
47315329
JF
1878static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1879{
1880 int err;
1881
1882 rtnl_lock();
1883 err = _virtnet_set_queues(vi, queue_pairs);
1884 rtnl_unlock();
1885 return err;
1886}
1887
296f96fc
RR
1888static int virtnet_close(struct net_device *dev)
1889{
1890 struct virtnet_info *vi = netdev_priv(dev);
986a4f4d 1891 int i;
296f96fc 1892
b2baed69
RR
1893 /* Make sure refill_work doesn't re-enable napi! */
1894 cancel_delayed_work_sync(&vi->refill);
986a4f4d 1895
b92f1e67 1896 for (i = 0; i < vi->max_queue_pairs; i++) {
754b8a21 1897 xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
986a4f4d 1898 napi_disable(&vi->rq[i].napi);
78a57b48 1899 virtnet_napi_tx_disable(&vi->sq[i].napi);
b92f1e67 1900 }
296f96fc 1901
296f96fc
RR
1902 return 0;
1903}
1904
2af7698e
AW
1905static void virtnet_set_rx_mode(struct net_device *dev)
1906{
1907 struct virtnet_info *vi = netdev_priv(dev);
f565a7c2 1908 struct scatterlist sg[2];
f565a7c2 1909 struct virtio_net_ctrl_mac *mac_data;
ccffad25 1910 struct netdev_hw_addr *ha;
32e7bfc4 1911 int uc_count;
4cd24eaf 1912 int mc_count;
f565a7c2
AW
1913 void *buf;
1914 int i;
2af7698e 1915
788a8b6d 1916 /* We can't dynamically set ndo_set_rx_mode, so return gracefully */
2af7698e
AW
1917 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
1918 return;
1919
12e57169
MT
1920 vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
1921 vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
2af7698e 1922
12e57169 1923 sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
2af7698e
AW
1924
1925 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
d24bae32 1926 VIRTIO_NET_CTRL_RX_PROMISC, sg))
2af7698e 1927 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
12e57169 1928 vi->ctrl->promisc ? "en" : "dis");
2af7698e 1929
12e57169 1930 sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
2af7698e
AW
1931
1932 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
d24bae32 1933 VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
2af7698e 1934 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
12e57169 1935 vi->ctrl->allmulti ? "en" : "dis");
f565a7c2 1936
32e7bfc4 1937 uc_count = netdev_uc_count(dev);
4cd24eaf 1938 mc_count = netdev_mc_count(dev);
f565a7c2 1939 /* MAC filter - use one buffer for both lists */
4cd24eaf
JP
1940 buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
1941 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
1942 mac_data = buf;
e68ed8f0 1943 if (!buf)
f565a7c2 1944 return;
f565a7c2 1945
23e258e1
AW
1946 sg_init_table(sg, 2);
1947
f565a7c2 1948 /* Store the unicast list and count in the front of the buffer */
fdd819b2 1949 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
ccffad25 1950 i = 0;
32e7bfc4 1951 netdev_for_each_uc_addr(ha, dev)
ccffad25 1952 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
f565a7c2
AW
1953
1954 sg_set_buf(&sg[0], mac_data,
32e7bfc4 1955 sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
f565a7c2
AW
1956
1957 /* multicast list and count fill the end */
32e7bfc4 1958 mac_data = (void *)&mac_data->macs[uc_count][0];
f565a7c2 1959
fdd819b2 1960 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
567ec874 1961 i = 0;
22bedad3
JP
1962 netdev_for_each_mc_addr(ha, dev)
1963 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
f565a7c2
AW
1964
1965 sg_set_buf(&sg[1], mac_data,
4cd24eaf 1966 sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
f565a7c2
AW
1967
1968 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
d24bae32 1969 VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
99e872ae 1970 dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
f565a7c2
AW
1971
1972 kfree(buf);
2af7698e
AW
1973}
1974
80d5c368
PM
1975static int virtnet_vlan_rx_add_vid(struct net_device *dev,
1976 __be16 proto, u16 vid)
0bde9569
AW
1977{
1978 struct virtnet_info *vi = netdev_priv(dev);
1979 struct scatterlist sg;
1980
d7fad4c8 1981 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
12e57169 1982 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
0bde9569
AW
1983
1984 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
d24bae32 1985 VIRTIO_NET_CTRL_VLAN_ADD, &sg))
0bde9569 1986 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
8e586137 1987 return 0;
0bde9569
AW
1988}
1989
80d5c368
PM
1990static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
1991 __be16 proto, u16 vid)
0bde9569
AW
1992{
1993 struct virtnet_info *vi = netdev_priv(dev);
1994 struct scatterlist sg;
1995
d7fad4c8 1996 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
12e57169 1997 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
0bde9569
AW
1998
1999 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
d24bae32 2000 VIRTIO_NET_CTRL_VLAN_DEL, &sg))
0bde9569 2001 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
8e586137 2002 return 0;
0bde9569
AW
2003}
2004
310974fa 2005static void virtnet_clean_affinity(struct virtnet_info *vi)
986a4f4d
JW
2006{
2007 int i;
2008
8898c21c
WG
2009 if (vi->affinity_hint_set) {
2010 for (i = 0; i < vi->max_queue_pairs; i++) {
19e226e8
CR
2011 virtqueue_set_affinity(vi->rq[i].vq, NULL);
2012 virtqueue_set_affinity(vi->sq[i].vq, NULL);
47be2479
WG
2013 }
2014
8898c21c
WG
2015 vi->affinity_hint_set = false;
2016 }
8898c21c 2017}
47be2479 2018
8898c21c
WG
2019static void virtnet_set_affinity(struct virtnet_info *vi)
2020{
2ca653d6
CR
2021 cpumask_var_t mask;
2022 int stragglers;
2023 int group_size;
2024 int i, j, cpu;
2025 int num_cpu;
2026 int stride;
2027
2028 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
310974fa 2029 virtnet_clean_affinity(vi);
8898c21c 2030 return;
986a4f4d
JW
2031 }
2032
2ca653d6
CR
2033 num_cpu = num_online_cpus();
2034 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
2035 stragglers = num_cpu >= vi->curr_queue_pairs ?
2036 num_cpu % vi->curr_queue_pairs :
2037 0;
2038 cpu = cpumask_next(-1, cpu_online_mask);
4d99f660 2039
2ca653d6
CR
2040 for (i = 0; i < vi->curr_queue_pairs; i++) {
2041 group_size = stride + (i < stragglers ? 1 : 0);
2042
2043 for (j = 0; j < group_size; j++) {
2044 cpumask_set_cpu(cpu, mask);
2045 cpu = cpumask_next_wrap(cpu, cpu_online_mask,
2046 nr_cpu_ids, false);
2047 }
2048 virtqueue_set_affinity(vi->rq[i].vq, mask);
2049 virtqueue_set_affinity(vi->sq[i].vq, mask);
044ab86d 2050 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
2ca653d6 2051 cpumask_clear(mask);
986a4f4d
JW
2052 }
2053
8898c21c 2054 vi->affinity_hint_set = true;
2ca653d6 2055 free_cpumask_var(mask);
986a4f4d
JW
2056}
2057
8017c279 2058static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
8de4b2f3 2059{
8017c279
SAS
2060 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2061 node);
2062 virtnet_set_affinity(vi);
2063 return 0;
2064}
8de4b2f3 2065
8017c279
SAS
2066static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
2067{
2068 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2069 node_dead);
2070 virtnet_set_affinity(vi);
2071 return 0;
2072}
3ab098df 2073
8017c279
SAS
2074static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
2075{
2076 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2077 node);
2078
310974fa 2079 virtnet_clean_affinity(vi);
8017c279
SAS
2080 return 0;
2081}
2082
2083static enum cpuhp_state virtionet_online;
2084
2085static int virtnet_cpu_notif_add(struct virtnet_info *vi)
2086{
2087 int ret;
2088
2089 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
2090 if (ret)
2091 return ret;
2092 ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2093 &vi->node_dead);
2094 if (!ret)
2095 return ret;
2096 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2097 return ret;
2098}
2099
2100static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
2101{
2102 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2103 cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2104 &vi->node_dead);
986a4f4d
JW
2105}
2106
8f9f4668
RJ
2107static void virtnet_get_ringparam(struct net_device *dev,
2108 struct ethtool_ringparam *ring)
2109{
2110 struct virtnet_info *vi = netdev_priv(dev);
2111
986a4f4d
JW
2112 ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2113 ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
8f9f4668
RJ
2114 ring->rx_pending = ring->rx_max_pending;
2115 ring->tx_pending = ring->tx_max_pending;
8f9f4668
RJ
2116}
2117
66846048
RJ
2118
2119static void virtnet_get_drvinfo(struct net_device *dev,
2120 struct ethtool_drvinfo *info)
2121{
2122 struct virtnet_info *vi = netdev_priv(dev);
2123 struct virtio_device *vdev = vi->vdev;
2124
2125 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
2126 strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
2127 strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
2128
2129}
2130
d73bcd2c
JW
2131/* TODO: Eliminate OOO packets during switching */
2132static int virtnet_set_channels(struct net_device *dev,
2133 struct ethtool_channels *channels)
2134{
2135 struct virtnet_info *vi = netdev_priv(dev);
2136 u16 queue_pairs = channels->combined_count;
2137 int err;
2138
2139 /* We don't support separate rx/tx channels.
2140 * We don't allow setting 'other' channels.
2141 */
2142 if (channels->rx_count || channels->tx_count || channels->other_count)
2143 return -EINVAL;
2144
c18e9cd6 2145 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
d73bcd2c
JW
2146 return -EINVAL;
2147
f600b690
JF
2148 /* For now we don't support modifying channels while XDP is loaded
2149 * also when XDP is loaded all RX queues have XDP programs so we only
2150 * need to check a single RX queue.
2151 */
2152 if (vi->rq[0].xdp_prog)
2153 return -EINVAL;
2154
47be2479 2155 get_online_cpus();
47315329 2156 err = _virtnet_set_queues(vi, queue_pairs);
de33212f
JD
2157 if (err) {
2158 put_online_cpus();
2159 goto err;
d73bcd2c 2160 }
de33212f 2161 virtnet_set_affinity(vi);
47be2479 2162 put_online_cpus();
d73bcd2c 2163
de33212f
JD
2164 netif_set_real_num_tx_queues(dev, queue_pairs);
2165 netif_set_real_num_rx_queues(dev, queue_pairs);
2166 err:
d73bcd2c
JW
2167 return err;
2168}
2169
d7dfc5cf
TM
2170static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2171{
2172 struct virtnet_info *vi = netdev_priv(dev);
d7dfc5cf 2173 unsigned int i, j;
d7a9a01b 2174 u8 *p = data;
d7dfc5cf
TM
2175
2176 switch (stringset) {
2177 case ETH_SS_STATS:
2178 for (i = 0; i < vi->curr_queue_pairs; i++) {
d7a9a01b
AD
2179 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
2180 ethtool_sprintf(&p, "rx_queue_%u_%s", i,
2181 virtnet_rq_stats_desc[j].desc);
d7dfc5cf
TM
2182 }
2183
2184 for (i = 0; i < vi->curr_queue_pairs; i++) {
d7a9a01b
AD
2185 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
2186 ethtool_sprintf(&p, "tx_queue_%u_%s", i,
2187 virtnet_sq_stats_desc[j].desc);
d7dfc5cf
TM
2188 }
2189 break;
2190 }
2191}
2192
2193static int virtnet_get_sset_count(struct net_device *dev, int sset)
2194{
2195 struct virtnet_info *vi = netdev_priv(dev);
2196
2197 switch (sset) {
2198 case ETH_SS_STATS:
2199 return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
2200 VIRTNET_SQ_STATS_LEN);
2201 default:
2202 return -EOPNOTSUPP;
2203 }
2204}
2205
2206static void virtnet_get_ethtool_stats(struct net_device *dev,
2207 struct ethtool_stats *stats, u64 *data)
2208{
2209 struct virtnet_info *vi = netdev_priv(dev);
2210 unsigned int idx = 0, start, i, j;
2211 const u8 *stats_base;
2212 size_t offset;
2213
2214 for (i = 0; i < vi->curr_queue_pairs; i++) {
2215 struct receive_queue *rq = &vi->rq[i];
2216
d46eeeaf 2217 stats_base = (u8 *)&rq->stats;
d7dfc5cf
TM
2218 do {
2219 start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
2220 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
2221 offset = virtnet_rq_stats_desc[j].offset;
2222 data[idx + j] = *(u64 *)(stats_base + offset);
2223 }
2224 } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
2225 idx += VIRTNET_RQ_STATS_LEN;
2226 }
2227
2228 for (i = 0; i < vi->curr_queue_pairs; i++) {
2229 struct send_queue *sq = &vi->sq[i];
2230
2231 stats_base = (u8 *)&sq->stats;
2232 do {
2233 start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
2234 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
2235 offset = virtnet_sq_stats_desc[j].offset;
2236 data[idx + j] = *(u64 *)(stats_base + offset);
2237 }
2238 } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
2239 idx += VIRTNET_SQ_STATS_LEN;
2240 }
2241}
2242
d73bcd2c
JW
2243static void virtnet_get_channels(struct net_device *dev,
2244 struct ethtool_channels *channels)
2245{
2246 struct virtnet_info *vi = netdev_priv(dev);
2247
2248 channels->combined_count = vi->curr_queue_pairs;
2249 channels->max_combined = vi->max_queue_pairs;
2250 channels->max_other = 0;
2251 channels->rx_count = 0;
2252 channels->tx_count = 0;
2253 channels->other_count = 0;
2254}
2255
ebb6b4b1
PR
2256static int virtnet_set_link_ksettings(struct net_device *dev,
2257 const struct ethtool_link_ksettings *cmd)
16032be5
NA
2258{
2259 struct virtnet_info *vi = netdev_priv(dev);
16032be5 2260
9aedc6e2
CF
2261 return ethtool_virtdev_set_link_ksettings(dev, cmd,
2262 &vi->speed, &vi->duplex);
16032be5
NA
2263}
2264
ebb6b4b1
PR
2265static int virtnet_get_link_ksettings(struct net_device *dev,
2266 struct ethtool_link_ksettings *cmd)
16032be5
NA
2267{
2268 struct virtnet_info *vi = netdev_priv(dev);
2269
ebb6b4b1
PR
2270 cmd->base.speed = vi->speed;
2271 cmd->base.duplex = vi->duplex;
2272 cmd->base.port = PORT_OTHER;
16032be5
NA
2273
2274 return 0;
2275}
2276
0c465be1
JW
2277static int virtnet_set_coalesce(struct net_device *dev,
2278 struct ethtool_coalesce *ec)
2279{
0c465be1
JW
2280 struct virtnet_info *vi = netdev_priv(dev);
2281 int i, napi_weight;
2282
a51e5206
JK
2283 if (ec->tx_max_coalesced_frames > 1 ||
2284 ec->rx_max_coalesced_frames != 1)
0c465be1
JW
2285 return -EINVAL;
2286
0c465be1 2287 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
0c465be1
JW
2288 if (napi_weight ^ vi->sq[0].napi.weight) {
2289 if (dev->flags & IFF_UP)
2290 return -EBUSY;
2291 for (i = 0; i < vi->max_queue_pairs; i++)
2292 vi->sq[i].napi.weight = napi_weight;
2293 }
2294
2295 return 0;
2296}
2297
2298static int virtnet_get_coalesce(struct net_device *dev,
2299 struct ethtool_coalesce *ec)
2300{
2301 struct ethtool_coalesce ec_default = {
2302 .cmd = ETHTOOL_GCOALESCE,
2303 .rx_max_coalesced_frames = 1,
2304 };
2305 struct virtnet_info *vi = netdev_priv(dev);
2306
2307 memcpy(ec, &ec_default, sizeof(ec_default));
2308
2309 if (vi->sq[0].napi.weight)
2310 ec->tx_max_coalesced_frames = 1;
2311
2312 return 0;
2313}
2314
16032be5
NA
2315static void virtnet_init_settings(struct net_device *dev)
2316{
2317 struct virtnet_info *vi = netdev_priv(dev);
2318
2319 vi->speed = SPEED_UNKNOWN;
2320 vi->duplex = DUPLEX_UNKNOWN;
2321}
2322
faa9b39f
JB
2323static void virtnet_update_settings(struct virtnet_info *vi)
2324{
2325 u32 speed;
2326 u8 duplex;
2327
2328 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
2329 return;
2330
64ffa39d
MT
2331 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
2332
faa9b39f
JB
2333 if (ethtool_validate_speed(speed))
2334 vi->speed = speed;
64ffa39d
MT
2335
2336 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
2337
faa9b39f
JB
2338 if (ethtool_validate_duplex(duplex))
2339 vi->duplex = duplex;
2340}
2341
0fc0b732 2342static const struct ethtool_ops virtnet_ethtool_ops = {
a51e5206 2343 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
66846048 2344 .get_drvinfo = virtnet_get_drvinfo,
9f4d26d0 2345 .get_link = ethtool_op_get_link,
8f9f4668 2346 .get_ringparam = virtnet_get_ringparam,
d7dfc5cf
TM
2347 .get_strings = virtnet_get_strings,
2348 .get_sset_count = virtnet_get_sset_count,
2349 .get_ethtool_stats = virtnet_get_ethtool_stats,
d73bcd2c
JW
2350 .set_channels = virtnet_set_channels,
2351 .get_channels = virtnet_get_channels,
074c3582 2352 .get_ts_info = ethtool_op_get_ts_info,
ebb6b4b1
PR
2353 .get_link_ksettings = virtnet_get_link_ksettings,
2354 .set_link_ksettings = virtnet_set_link_ksettings,
0c465be1
JW
2355 .set_coalesce = virtnet_set_coalesce,
2356 .get_coalesce = virtnet_get_coalesce,
a9ea3fc6
HX
2357};
2358
9fe7bfce
JF
2359static void virtnet_freeze_down(struct virtio_device *vdev)
2360{
2361 struct virtnet_info *vi = vdev->priv;
2362 int i;
2363
2364 /* Make sure no work handler is accessing the device */
2365 flush_work(&vi->config_work);
2366
05c998b7 2367 netif_tx_lock_bh(vi->dev);
9fe7bfce 2368 netif_device_detach(vi->dev);
05c998b7 2369 netif_tx_unlock_bh(vi->dev);
9fe7bfce
JF
2370 cancel_delayed_work_sync(&vi->refill);
2371
2372 if (netif_running(vi->dev)) {
b92f1e67 2373 for (i = 0; i < vi->max_queue_pairs; i++) {
9fe7bfce 2374 napi_disable(&vi->rq[i].napi);
78a57b48 2375 virtnet_napi_tx_disable(&vi->sq[i].napi);
b92f1e67 2376 }
9fe7bfce
JF
2377 }
2378}
2379
2380static int init_vqs(struct virtnet_info *vi);
2381
2382static int virtnet_restore_up(struct virtio_device *vdev)
2383{
2384 struct virtnet_info *vi = vdev->priv;
2385 int err, i;
2386
2387 err = init_vqs(vi);
2388 if (err)
2389 return err;
2390
2391 virtio_device_ready(vdev);
2392
2393 if (netif_running(vi->dev)) {
2394 for (i = 0; i < vi->curr_queue_pairs; i++)
2395 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
2396 schedule_delayed_work(&vi->refill, 0);
2397
b92f1e67 2398 for (i = 0; i < vi->max_queue_pairs; i++) {
e4e8452a 2399 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
b92f1e67
WB
2400 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2401 &vi->sq[i].napi);
2402 }
9fe7bfce
JF
2403 }
2404
05c998b7 2405 netif_tx_lock_bh(vi->dev);
9fe7bfce 2406 netif_device_attach(vi->dev);
05c998b7 2407 netif_tx_unlock_bh(vi->dev);
9fe7bfce
JF
2408 return err;
2409}
2410
3f93522f
JW
2411static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
2412{
2413 struct scatterlist sg;
12e57169 2414 vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
3f93522f 2415
12e57169 2416 sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
3f93522f
JW
2417
2418 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
2419 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
7934b481 2420 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
3f93522f
JW
2421 return -EINVAL;
2422 }
2423
2424 return 0;
2425}
2426
2427static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
2428{
2429 u64 offloads = 0;
2430
2431 if (!vi->guest_offloads)
2432 return 0;
2433
3f93522f
JW
2434 return virtnet_set_guest_offloads(vi, offloads);
2435}
2436
2437static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
2438{
2439 u64 offloads = vi->guest_offloads;
2440
2441 if (!vi->guest_offloads)
2442 return 0;
3f93522f
JW
2443
2444 return virtnet_set_guest_offloads(vi, offloads);
2445}
2446
9861ce03
JK
2447static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2448 struct netlink_ext_ack *extack)
f600b690
JF
2449{
2450 unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
2451 struct virtnet_info *vi = netdev_priv(dev);
2452 struct bpf_prog *old_prog;
017b29c3 2453 u16 xdp_qp = 0, curr_qp;
672aafd5 2454 int i, err;
f600b690 2455
3f93522f
JW
2456 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
2457 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
2458 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
2459 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
18ba58e1
JW
2460 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
2461 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
2462 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first");
f600b690
JF
2463 return -EOPNOTSUPP;
2464 }
2465
2466 if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
4d463c4d 2467 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
f600b690
JF
2468 return -EINVAL;
2469 }
2470
2471 if (dev->mtu > max_sz) {
4d463c4d 2472 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
f600b690
JF
2473 netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
2474 return -EINVAL;
2475 }
2476
672aafd5
JF
2477 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
2478 if (prog)
2479 xdp_qp = nr_cpu_ids;
2480
2481 /* XDP requires extra queues for XDP_TX */
2482 if (curr_qp + xdp_qp > vi->max_queue_pairs) {
97c2c69e 2483 netdev_warn(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
672aafd5 2484 curr_qp + xdp_qp, vi->max_queue_pairs);
97c2c69e 2485 xdp_qp = 0;
672aafd5
JF
2486 }
2487
03aa6d34
TM
2488 old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
2489 if (!prog && !old_prog)
2490 return 0;
2491
85192dbf
AN
2492 if (prog)
2493 bpf_prog_add(prog, vi->max_queue_pairs - 1);
2de2f7f4 2494
4941d472 2495 /* Make sure NAPI is not using any XDP TX queues for RX. */
534da5e8
TM
2496 if (netif_running(dev)) {
2497 for (i = 0; i < vi->max_queue_pairs; i++) {
4e09ff53 2498 napi_disable(&vi->rq[i].napi);
534da5e8
TM
2499 virtnet_napi_tx_disable(&vi->sq[i].napi);
2500 }
2501 }
f600b690 2502
03aa6d34
TM
2503 if (!prog) {
2504 for (i = 0; i < vi->max_queue_pairs; i++) {
2505 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
2506 if (i == 0)
2507 virtnet_restore_guest_offloads(vi);
2508 }
2509 synchronize_net();
2510 }
f600b690 2511
4941d472
JW
2512 err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
2513 if (err)
2514 goto err;
188313c1 2515 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
4941d472 2516 vi->xdp_queue_pairs = xdp_qp;
672aafd5 2517
03aa6d34 2518 if (prog) {
97c2c69e 2519 vi->xdp_enabled = true;
03aa6d34
TM
2520 for (i = 0; i < vi->max_queue_pairs; i++) {
2521 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
2522 if (i == 0 && !old_prog)
3f93522f 2523 virtnet_clear_guest_offloads(vi);
3f93522f 2524 }
97c2c69e
XZ
2525 } else {
2526 vi->xdp_enabled = false;
03aa6d34
TM
2527 }
2528
2529 for (i = 0; i < vi->max_queue_pairs; i++) {
f600b690
JF
2530 if (old_prog)
2531 bpf_prog_put(old_prog);
534da5e8 2532 if (netif_running(dev)) {
4e09ff53 2533 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
534da5e8
TM
2534 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2535 &vi->sq[i].napi);
2536 }
f600b690
JF
2537 }
2538
2539 return 0;
2de2f7f4 2540
4941d472 2541err:
03aa6d34
TM
2542 if (!prog) {
2543 virtnet_clear_guest_offloads(vi);
2544 for (i = 0; i < vi->max_queue_pairs; i++)
2545 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
2546 }
2547
8be4d9a4 2548 if (netif_running(dev)) {
534da5e8 2549 for (i = 0; i < vi->max_queue_pairs; i++) {
8be4d9a4 2550 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
534da5e8
TM
2551 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2552 &vi->sq[i].napi);
2553 }
8be4d9a4 2554 }
2de2f7f4
JF
2555 if (prog)
2556 bpf_prog_sub(prog, vi->max_queue_pairs - 1);
2557 return err;
f600b690
JF
2558}
2559
f4e63525 2560static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
f600b690
JF
2561{
2562 switch (xdp->command) {
2563 case XDP_SETUP_PROG:
9861ce03 2564 return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
f600b690
JF
2565 default:
2566 return -EINVAL;
2567 }
2568}
2569
ba5e4426
SS
2570static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
2571 size_t len)
2572{
2573 struct virtnet_info *vi = netdev_priv(dev);
2574 int ret;
2575
2576 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
2577 return -EOPNOTSUPP;
2578
2579 ret = snprintf(buf, len, "sby");
2580 if (ret >= len)
2581 return -EOPNOTSUPP;
2582
2583 return 0;
2584}
2585
a02e8964
WB
2586static int virtnet_set_features(struct net_device *dev,
2587 netdev_features_t features)
2588{
2589 struct virtnet_info *vi = netdev_priv(dev);
cf8691cb 2590 u64 offloads;
a02e8964
WB
2591 int err;
2592
3618ad2a 2593 if ((dev->features ^ features) & NETIF_F_LRO) {
97c2c69e 2594 if (vi->xdp_enabled)
cf8691cb
MT
2595 return -EBUSY;
2596
a02e8964 2597 if (features & NETIF_F_LRO)
cf8691cb 2598 offloads = vi->guest_offloads_capable;
a02e8964 2599 else
cf8691cb
MT
2600 offloads = vi->guest_offloads_capable &
2601 ~GUEST_OFFLOAD_LRO_MASK;
a02e8964 2602
cf8691cb
MT
2603 err = virtnet_set_guest_offloads(vi, offloads);
2604 if (err)
2605 return err;
2606 vi->guest_offloads = offloads;
a02e8964
WB
2607 }
2608
2609 return 0;
2610}
2611
76288b4e
SH
2612static const struct net_device_ops virtnet_netdev = {
2613 .ndo_open = virtnet_open,
2614 .ndo_stop = virtnet_close,
2615 .ndo_start_xmit = start_xmit,
2616 .ndo_validate_addr = eth_validate_addr,
9c46f6d4 2617 .ndo_set_mac_address = virtnet_set_mac_address,
2af7698e 2618 .ndo_set_rx_mode = virtnet_set_rx_mode,
3fa2a1df 2619 .ndo_get_stats64 = virtnet_stats,
1824a989
AW
2620 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
2621 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
f4e63525 2622 .ndo_bpf = virtnet_xdp,
186b3c99 2623 .ndo_xdp_xmit = virtnet_xdp_xmit,
2836b4f2 2624 .ndo_features_check = passthru_features_check,
ba5e4426 2625 .ndo_get_phys_port_name = virtnet_get_phys_port_name,
a02e8964 2626 .ndo_set_features = virtnet_set_features,
76288b4e
SH
2627};
2628
586d17c5 2629static void virtnet_config_changed_work(struct work_struct *work)
9f4d26d0 2630{
586d17c5
JW
2631 struct virtnet_info *vi =
2632 container_of(work, struct virtnet_info, config_work);
9f4d26d0
MM
2633 u16 v;
2634
855e0c52
RR
2635 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
2636 struct virtio_net_config, status, &v) < 0)
507613bf 2637 return;
586d17c5
JW
2638
2639 if (v & VIRTIO_NET_S_ANNOUNCE) {
ee89bab1 2640 netdev_notify_peers(vi->dev);
586d17c5
JW
2641 virtnet_ack_link_announce(vi);
2642 }
9f4d26d0
MM
2643
2644 /* Ignore unknown (future) status bits */
2645 v &= VIRTIO_NET_S_LINK_UP;
2646
2647 if (vi->status == v)
507613bf 2648 return;
9f4d26d0
MM
2649
2650 vi->status = v;
2651
2652 if (vi->status & VIRTIO_NET_S_LINK_UP) {
faa9b39f 2653 virtnet_update_settings(vi);
9f4d26d0 2654 netif_carrier_on(vi->dev);
986a4f4d 2655 netif_tx_wake_all_queues(vi->dev);
9f4d26d0
MM
2656 } else {
2657 netif_carrier_off(vi->dev);
986a4f4d 2658 netif_tx_stop_all_queues(vi->dev);
9f4d26d0
MM
2659 }
2660}
2661
2662static void virtnet_config_changed(struct virtio_device *vdev)
2663{
2664 struct virtnet_info *vi = vdev->priv;
2665
3b07e9ca 2666 schedule_work(&vi->config_work);
9f4d26d0
MM
2667}
2668
986a4f4d
JW
2669static void virtnet_free_queues(struct virtnet_info *vi)
2670{
d4fb84ee
AV
2671 int i;
2672
ab3971b1 2673 for (i = 0; i < vi->max_queue_pairs; i++) {
5198d545
JK
2674 __netif_napi_del(&vi->rq[i].napi);
2675 __netif_napi_del(&vi->sq[i].napi);
ab3971b1 2676 }
d4fb84ee 2677
5198d545 2678 /* We called __netif_napi_del(),
963abe5c
ED
2679 * we need to respect an RCU grace period before freeing vi->rq
2680 */
2681 synchronize_net();
2682
986a4f4d
JW
2683 kfree(vi->rq);
2684 kfree(vi->sq);
12e57169 2685 kfree(vi->ctrl);
986a4f4d
JW
2686}
2687
47315329 2688static void _free_receive_bufs(struct virtnet_info *vi)
986a4f4d 2689{
f600b690 2690 struct bpf_prog *old_prog;
986a4f4d
JW
2691 int i;
2692
2693 for (i = 0; i < vi->max_queue_pairs; i++) {
2694 while (vi->rq[i].pages)
2695 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
f600b690
JF
2696
2697 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
2698 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
2699 if (old_prog)
2700 bpf_prog_put(old_prog);
986a4f4d 2701 }
47315329
JF
2702}
2703
2704static void free_receive_bufs(struct virtnet_info *vi)
2705{
2706 rtnl_lock();
2707 _free_receive_bufs(vi);
f600b690 2708 rtnl_unlock();
986a4f4d
JW
2709}
2710
fb51879d
MD
2711static void free_receive_page_frags(struct virtnet_info *vi)
2712{
2713 int i;
2714 for (i = 0; i < vi->max_queue_pairs; i++)
2715 if (vi->rq[i].alloc_frag.page)
2716 put_page(vi->rq[i].alloc_frag.page);
2717}
2718
986a4f4d
JW
2719static void free_unused_bufs(struct virtnet_info *vi)
2720{
2721 void *buf;
2722 int i;
2723
2724 for (i = 0; i < vi->max_queue_pairs; i++) {
2725 struct virtqueue *vq = vi->sq[i].vq;
56434a01 2726 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
5050471d 2727 if (!is_xdp_frame(buf))
56434a01
JF
2728 dev_kfree_skb(buf);
2729 else
5050471d 2730 xdp_return_frame(ptr_to_xdp(buf));
56434a01 2731 }
986a4f4d
JW
2732 }
2733
2734 for (i = 0; i < vi->max_queue_pairs; i++) {
2735 struct virtqueue *vq = vi->rq[i].vq;
2736
2737 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
ab7db917 2738 if (vi->mergeable_rx_bufs) {
680557cf 2739 put_page(virt_to_head_page(buf));
ab7db917 2740 } else if (vi->big_packets) {
fa9fac17 2741 give_pages(&vi->rq[i], buf);
ab7db917 2742 } else {
f6b10209 2743 put_page(virt_to_head_page(buf));
ab7db917 2744 }
986a4f4d 2745 }
986a4f4d
JW
2746 }
2747}
2748
e9d7417b
JW
2749static void virtnet_del_vqs(struct virtnet_info *vi)
2750{
2751 struct virtio_device *vdev = vi->vdev;
2752
310974fa 2753 virtnet_clean_affinity(vi);
986a4f4d 2754
e9d7417b 2755 vdev->config->del_vqs(vdev);
986a4f4d
JW
2756
2757 virtnet_free_queues(vi);
e9d7417b
JW
2758}
2759
d85b758f
MT
2760/* How large should a single buffer be so a queue full of these can fit at
2761 * least one full packet?
2762 * Logic below assumes the mergeable buffer header is used.
2763 */
2764static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
2765{
2766 const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2767 unsigned int rq_size = virtqueue_get_vring_size(vq);
2768 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
2769 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
2770 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
2771
f0c3192c
MT
2772 return max(max(min_buf_len, hdr_len) - hdr_len,
2773 (unsigned int)GOOD_PACKET_LEN);
d85b758f
MT
2774}
2775
986a4f4d 2776static int virtnet_find_vqs(struct virtnet_info *vi)
3f9c10b0 2777{
986a4f4d
JW
2778 vq_callback_t **callbacks;
2779 struct virtqueue **vqs;
2780 int ret = -ENOMEM;
2781 int i, total_vqs;
2782 const char **names;
d45b897b 2783 bool *ctx;
986a4f4d
JW
2784
2785 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
2786 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
2787 * possible control vq.
2788 */
2789 total_vqs = vi->max_queue_pairs * 2 +
2790 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
2791
2792 /* Allocate space for find_vqs parameters */
6396bb22 2793 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
986a4f4d
JW
2794 if (!vqs)
2795 goto err_vq;
6da2ec56 2796 callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
986a4f4d
JW
2797 if (!callbacks)
2798 goto err_callback;
6da2ec56 2799 names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
986a4f4d
JW
2800 if (!names)
2801 goto err_names;
192f68cf 2802 if (!vi->big_packets || vi->mergeable_rx_bufs) {
6396bb22 2803 ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
d45b897b
MT
2804 if (!ctx)
2805 goto err_ctx;
2806 } else {
2807 ctx = NULL;
2808 }
986a4f4d
JW
2809
2810 /* Parameters for control virtqueue, if any */
2811 if (vi->has_cvq) {
2812 callbacks[total_vqs - 1] = NULL;
2813 names[total_vqs - 1] = "control";
2814 }
3f9c10b0 2815
986a4f4d
JW
2816 /* Allocate/initialize parameters for send/receive virtqueues */
2817 for (i = 0; i < vi->max_queue_pairs; i++) {
2818 callbacks[rxq2vq(i)] = skb_recv_done;
2819 callbacks[txq2vq(i)] = skb_xmit_done;
2820 sprintf(vi->rq[i].name, "input.%d", i);
2821 sprintf(vi->sq[i].name, "output.%d", i);
2822 names[rxq2vq(i)] = vi->rq[i].name;
2823 names[txq2vq(i)] = vi->sq[i].name;
d45b897b
MT
2824 if (ctx)
2825 ctx[rxq2vq(i)] = true;
986a4f4d 2826 }
3f9c10b0 2827
986a4f4d 2828 ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
d45b897b 2829 names, ctx, NULL);
986a4f4d
JW
2830 if (ret)
2831 goto err_find;
3f9c10b0 2832
986a4f4d
JW
2833 if (vi->has_cvq) {
2834 vi->cvq = vqs[total_vqs - 1];
3f9c10b0 2835 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
f646968f 2836 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3f9c10b0 2837 }
986a4f4d
JW
2838
2839 for (i = 0; i < vi->max_queue_pairs; i++) {
2840 vi->rq[i].vq = vqs[rxq2vq(i)];
d85b758f 2841 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
986a4f4d
JW
2842 vi->sq[i].vq = vqs[txq2vq(i)];
2843 }
2844
2fa3c8a8 2845 /* run here: ret == 0. */
986a4f4d 2846
986a4f4d
JW
2847
2848err_find:
d45b897b
MT
2849 kfree(ctx);
2850err_ctx:
986a4f4d
JW
2851 kfree(names);
2852err_names:
2853 kfree(callbacks);
2854err_callback:
2855 kfree(vqs);
2856err_vq:
2857 return ret;
2858}
2859
2860static int virtnet_alloc_queues(struct virtnet_info *vi)
2861{
2862 int i;
2863
12e57169
MT
2864 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
2865 if (!vi->ctrl)
2866 goto err_ctrl;
6396bb22 2867 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
986a4f4d
JW
2868 if (!vi->sq)
2869 goto err_sq;
6396bb22 2870 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
008d4278 2871 if (!vi->rq)
986a4f4d
JW
2872 goto err_rq;
2873
2874 INIT_DELAYED_WORK(&vi->refill, refill_work);
2875 for (i = 0; i < vi->max_queue_pairs; i++) {
2876 vi->rq[i].pages = NULL;
2877 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
2878 napi_weight);
1d11e732
WB
2879 netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
2880 napi_tx ? napi_weight : 0);
986a4f4d
JW
2881
2882 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
5377d758 2883 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
986a4f4d 2884 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
d7dfc5cf
TM
2885
2886 u64_stats_init(&vi->rq[i].stats.syncp);
2887 u64_stats_init(&vi->sq[i].stats.syncp);
986a4f4d
JW
2888 }
2889
2890 return 0;
2891
2892err_rq:
2893 kfree(vi->sq);
2894err_sq:
12e57169
MT
2895 kfree(vi->ctrl);
2896err_ctrl:
986a4f4d
JW
2897 return -ENOMEM;
2898}
2899
2900static int init_vqs(struct virtnet_info *vi)
2901{
2902 int ret;
2903
2904 /* Allocate send & receive queues */
2905 ret = virtnet_alloc_queues(vi);
2906 if (ret)
2907 goto err;
2908
2909 ret = virtnet_find_vqs(vi);
2910 if (ret)
2911 goto err_free;
2912
47be2479 2913 get_online_cpus();
8898c21c 2914 virtnet_set_affinity(vi);
47be2479
WG
2915 put_online_cpus();
2916
986a4f4d
JW
2917 return 0;
2918
2919err_free:
2920 virtnet_free_queues(vi);
2921err:
2922 return ret;
3f9c10b0
AS
2923}
2924
fbf28d78
MD
2925#ifdef CONFIG_SYSFS
2926static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
718ad681 2927 char *buf)
fbf28d78
MD
2928{
2929 struct virtnet_info *vi = netdev_priv(queue->dev);
2930 unsigned int queue_index = get_netdev_rx_queue_index(queue);
3cc81a9a
JW
2931 unsigned int headroom = virtnet_get_headroom(vi);
2932 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
5377d758 2933 struct ewma_pkt_len *avg;
fbf28d78
MD
2934
2935 BUG_ON(queue_index >= vi->max_queue_pairs);
2936 avg = &vi->rq[queue_index].mrg_avg_pkt_len;
d85b758f 2937 return sprintf(buf, "%u\n",
3cc81a9a
JW
2938 get_mergeable_buf_len(&vi->rq[queue_index], avg,
2939 SKB_DATA_ALIGN(headroom + tailroom)));
fbf28d78
MD
2940}
2941
2942static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
2943 __ATTR_RO(mergeable_rx_buffer_size);
2944
2945static struct attribute *virtio_net_mrg_rx_attrs[] = {
2946 &mergeable_rx_buffer_size_attribute.attr,
2947 NULL
2948};
2949
2950static const struct attribute_group virtio_net_mrg_rx_group = {
2951 .name = "virtio_net",
2952 .attrs = virtio_net_mrg_rx_attrs
2953};
2954#endif
2955
892d6eb1
JW
2956static bool virtnet_fail_on_feature(struct virtio_device *vdev,
2957 unsigned int fbit,
2958 const char *fname, const char *dname)
2959{
2960 if (!virtio_has_feature(vdev, fbit))
2961 return false;
2962
2963 dev_err(&vdev->dev, "device advertises feature %s but not %s",
2964 fname, dname);
2965
2966 return true;
2967}
2968
2969#define VIRTNET_FAIL_ON(vdev, fbit, dbit) \
2970 virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
2971
2972static bool virtnet_validate_features(struct virtio_device *vdev)
2973{
2974 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
2975 (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
2976 "VIRTIO_NET_F_CTRL_VQ") ||
2977 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
2978 "VIRTIO_NET_F_CTRL_VQ") ||
2979 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
2980 "VIRTIO_NET_F_CTRL_VQ") ||
2981 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
2982 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
2983 "VIRTIO_NET_F_CTRL_VQ"))) {
2984 return false;
2985 }
2986
2987 return true;
2988}
2989
d0c2c997
JW
2990#define MIN_MTU ETH_MIN_MTU
2991#define MAX_MTU ETH_MAX_MTU
2992
fe36cbe0 2993static int virtnet_validate(struct virtio_device *vdev)
296f96fc 2994{
6ba42248
MT
2995 if (!vdev->config->get) {
2996 dev_err(&vdev->dev, "%s failure: config access disabled\n",
2997 __func__);
2998 return -EINVAL;
2999 }
3000
892d6eb1
JW
3001 if (!virtnet_validate_features(vdev))
3002 return -EINVAL;
3003
fe36cbe0
MT
3004 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
3005 int mtu = virtio_cread16(vdev,
3006 offsetof(struct virtio_net_config,
3007 mtu));
3008 if (mtu < MIN_MTU)
3009 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
3010 }
3011
3012 return 0;
3013}
3014
3015static int virtnet_probe(struct virtio_device *vdev)
3016{
d7dfc5cf 3017 int i, err = -ENOMEM;
fe36cbe0
MT
3018 struct net_device *dev;
3019 struct virtnet_info *vi;
3020 u16 max_queue_pairs;
3021 int mtu;
3022
986a4f4d 3023 /* Find if host supports multiqueue virtio_net device */
855e0c52
RR
3024 err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
3025 struct virtio_net_config,
3026 max_virtqueue_pairs, &max_queue_pairs);
986a4f4d
JW
3027
3028 /* We need at least 2 queue's */
3029 if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
3030 max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
3031 !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
3032 max_queue_pairs = 1;
296f96fc
RR
3033
3034 /* Allocate ourselves a network device with room for our info */
986a4f4d 3035 dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
296f96fc
RR
3036 if (!dev)
3037 return -ENOMEM;
3038
3039 /* Set up network device as normal. */
ab5bd583
XZ
3040 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
3041 IFF_TX_SKB_NO_LINEAR;
76288b4e 3042 dev->netdev_ops = &virtnet_netdev;
296f96fc 3043 dev->features = NETIF_F_HIGHDMA;
3fa2a1df 3044
7ad24ea4 3045 dev->ethtool_ops = &virtnet_ethtool_ops;
296f96fc
RR
3046 SET_NETDEV_DEV(dev, &vdev->dev);
3047
3048 /* Do we support "hardware" checksums? */
98e778c9 3049 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
296f96fc 3050 /* This opens up the world of extra features. */
48900cb6 3051 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
98e778c9 3052 if (csum)
48900cb6 3053 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
98e778c9
MM
3054
3055 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
e078de03 3056 dev->hw_features |= NETIF_F_TSO
34a48579
RR
3057 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
3058 }
5539ae96 3059 /* Individual feature bits: what can host handle? */
98e778c9
MM
3060 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
3061 dev->hw_features |= NETIF_F_TSO;
3062 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
3063 dev->hw_features |= NETIF_F_TSO6;
3064 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
3065 dev->hw_features |= NETIF_F_TSO_ECN;
98e778c9 3066
41f2f127
JW
3067 dev->features |= NETIF_F_GSO_ROBUST;
3068
98e778c9 3069 if (gso)
e078de03 3070 dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
98e778c9 3071 /* (!csum && gso) case will be fixed by register_netdev() */
296f96fc 3072 }
4f49129b
TH
3073 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
3074 dev->features |= NETIF_F_RXCSUM;
a02e8964
WB
3075 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3076 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
3077 dev->features |= NETIF_F_LRO;
cf8691cb 3078 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
a02e8964 3079 dev->hw_features |= NETIF_F_LRO;
296f96fc 3080
4fda8302
JW
3081 dev->vlan_features = dev->features;
3082
d0c2c997
JW
3083 /* MTU range: 68 - 65535 */
3084 dev->min_mtu = MIN_MTU;
3085 dev->max_mtu = MAX_MTU;
3086
296f96fc 3087 /* Configuration may specify what MAC to use. Otherwise random. */
855e0c52
RR
3088 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
3089 virtio_cread_bytes(vdev,
3090 offsetof(struct virtio_net_config, mac),
3091 dev->dev_addr, dev->addr_len);
3092 else
f2cedb63 3093 eth_hw_addr_random(dev);
296f96fc
RR
3094
3095 /* Set up our device-specific information */
3096 vi = netdev_priv(dev);
296f96fc
RR
3097 vi->dev = dev;
3098 vi->vdev = vdev;
d9d5dcc8 3099 vdev->priv = vi;
827da44c 3100
586d17c5 3101 INIT_WORK(&vi->config_work, virtnet_config_changed_work);
296f96fc 3102
97402b96 3103 /* If we can receive ANY GSO packets, we must allocate large ones. */
8e95a202
JP
3104 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3105 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
e3e3c423
VY
3106 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
3107 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
97402b96
HX
3108 vi->big_packets = true;
3109
3f2c31d9
MM
3110 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
3111 vi->mergeable_rx_bufs = true;
3112
d04302b3
MT
3113 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
3114 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
012873d0
MT
3115 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
3116 else
3117 vi->hdr_len = sizeof(struct virtio_net_hdr);
3118
75993300
MT
3119 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
3120 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
e7428e95
MT
3121 vi->any_header_sg = true;
3122
986a4f4d
JW
3123 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
3124 vi->has_cvq = true;
3125
14de9d11
AC
3126 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
3127 mtu = virtio_cread16(vdev,
3128 offsetof(struct virtio_net_config,
3129 mtu));
93a205ee 3130 if (mtu < dev->min_mtu) {
fe36cbe0
MT
3131 /* Should never trigger: MTU was previously validated
3132 * in virtnet_validate.
3133 */
7934b481
YS
3134 dev_err(&vdev->dev,
3135 "device MTU appears to have changed it is now %d < %d",
3136 mtu, dev->min_mtu);
411ea23a 3137 err = -EINVAL;
d7dfc5cf 3138 goto free;
93a205ee 3139 }
2e123b44 3140
fe36cbe0
MT
3141 dev->mtu = mtu;
3142 dev->max_mtu = mtu;
3143
2e123b44
MT
3144 /* TODO: size buffers correctly in this case. */
3145 if (dev->mtu > ETH_DATA_LEN)
3146 vi->big_packets = true;
14de9d11
AC
3147 }
3148
012873d0
MT
3149 if (vi->any_header_sg)
3150 dev->needed_headroom = vi->hdr_len;
6ebbc1a6 3151
44900010
JW
3152 /* Enable multiqueue by default */
3153 if (num_online_cpus() >= max_queue_pairs)
3154 vi->curr_queue_pairs = max_queue_pairs;
3155 else
3156 vi->curr_queue_pairs = num_online_cpus();
986a4f4d
JW
3157 vi->max_queue_pairs = max_queue_pairs;
3158
3159 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
3f9c10b0 3160 err = init_vqs(vi);
d2a7ddda 3161 if (err)
d7dfc5cf 3162 goto free;
296f96fc 3163
fbf28d78
MD
3164#ifdef CONFIG_SYSFS
3165 if (vi->mergeable_rx_bufs)
3166 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
3167#endif
0f13b66b
ZYW
3168 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
3169 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
986a4f4d 3170
16032be5
NA
3171 virtnet_init_settings(dev);
3172
ba5e4426
SS
3173 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
3174 vi->failover = net_failover_create(vi->dev);
4b8e6ac4
WY
3175 if (IS_ERR(vi->failover)) {
3176 err = PTR_ERR(vi->failover);
ba5e4426 3177 goto free_vqs;
4b8e6ac4 3178 }
ba5e4426
SS
3179 }
3180
296f96fc
RR
3181 err = register_netdev(dev);
3182 if (err) {
3183 pr_debug("virtio_net: registering device failed\n");
ba5e4426 3184 goto free_failover;
296f96fc 3185 }
b3369c1f 3186
4baf1e33
MT
3187 virtio_device_ready(vdev);
3188
8017c279 3189 err = virtnet_cpu_notif_add(vi);
8de4b2f3
WG
3190 if (err) {
3191 pr_debug("virtio_net: registering cpu notifier failed\n");
f00e35e2 3192 goto free_unregister_netdev;
8de4b2f3
WG
3193 }
3194
a220871b 3195 virtnet_set_queues(vi, vi->curr_queue_pairs);
44900010 3196
167c25e4
JW
3197 /* Assume link up if device can't report link status,
3198 otherwise get link status from config. */
bda7fab5 3199 netif_carrier_off(dev);
167c25e4 3200 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
3b07e9ca 3201 schedule_work(&vi->config_work);
167c25e4
JW
3202 } else {
3203 vi->status = VIRTIO_NET_S_LINK_UP;
faa9b39f 3204 virtnet_update_settings(vi);
167c25e4
JW
3205 netif_carrier_on(dev);
3206 }
9f4d26d0 3207
3f93522f
JW
3208 for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
3209 if (virtio_has_feature(vi->vdev, guest_offloads[i]))
3210 set_bit(guest_offloads[i], &vi->guest_offloads);
a02e8964 3211 vi->guest_offloads_capable = vi->guest_offloads;
3f93522f 3212
986a4f4d
JW
3213 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
3214 dev->name, max_queue_pairs);
3215
296f96fc
RR
3216 return 0;
3217
f00e35e2 3218free_unregister_netdev:
02465555
MT
3219 vi->vdev->config->reset(vdev);
3220
b3369c1f 3221 unregister_netdev(dev);
ba5e4426
SS
3222free_failover:
3223 net_failover_destroy(vi->failover);
d2a7ddda 3224free_vqs:
986a4f4d 3225 cancel_delayed_work_sync(&vi->refill);
fb51879d 3226 free_receive_page_frags(vi);
e9d7417b 3227 virtnet_del_vqs(vi);
296f96fc
RR
3228free:
3229 free_netdev(dev);
3230 return err;
3231}
3232
04486ed0 3233static void remove_vq_common(struct virtnet_info *vi)
296f96fc 3234{
04486ed0 3235 vi->vdev->config->reset(vi->vdev);
830a8a97
SM
3236
3237 /* Free unused buffers in both send and recv, if any. */
9ab86bbc 3238 free_unused_bufs(vi);
fb6813f4 3239
986a4f4d 3240 free_receive_bufs(vi);
d2a7ddda 3241
fb51879d
MD
3242 free_receive_page_frags(vi);
3243
986a4f4d 3244 virtnet_del_vqs(vi);
04486ed0
AS
3245}
3246
8cc085d6 3247static void virtnet_remove(struct virtio_device *vdev)
04486ed0
AS
3248{
3249 struct virtnet_info *vi = vdev->priv;
3250
8017c279 3251 virtnet_cpu_notif_remove(vi);
8de4b2f3 3252
102a2786
MT
3253 /* Make sure no work handler is accessing the device. */
3254 flush_work(&vi->config_work);
586d17c5 3255
04486ed0
AS
3256 unregister_netdev(vi->dev);
3257
ba5e4426
SS
3258 net_failover_destroy(vi->failover);
3259
04486ed0 3260 remove_vq_common(vi);
fb6813f4 3261
74b2553f 3262 free_netdev(vi->dev);
296f96fc
RR
3263}
3264
67a75194 3265static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
0741bcb5
AS
3266{
3267 struct virtnet_info *vi = vdev->priv;
3268
8017c279 3269 virtnet_cpu_notif_remove(vi);
9fe7bfce 3270 virtnet_freeze_down(vdev);
0741bcb5
AS
3271 remove_vq_common(vi);
3272
3273 return 0;
3274}
3275
67a75194 3276static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
0741bcb5
AS
3277{
3278 struct virtnet_info *vi = vdev->priv;
9fe7bfce 3279 int err;
0741bcb5 3280
9fe7bfce 3281 err = virtnet_restore_up(vdev);
0741bcb5
AS
3282 if (err)
3283 return err;
986a4f4d
JW
3284 virtnet_set_queues(vi, vi->curr_queue_pairs);
3285
8017c279 3286 err = virtnet_cpu_notif_add(vi);
ec9debbd
JW
3287 if (err)
3288 return err;
3289
0741bcb5
AS
3290 return 0;
3291}
0741bcb5 3292
296f96fc
RR
3293static struct virtio_device_id id_table[] = {
3294 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
3295 { 0 },
3296};
3297
f3358507
MT
3298#define VIRTNET_FEATURES \
3299 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
3300 VIRTIO_NET_F_MAC, \
3301 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
3302 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
3303 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
3304 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
3305 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
3306 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
3307 VIRTIO_NET_F_CTRL_MAC_ADDR, \
faa9b39f 3308 VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
9805069d 3309 VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY
f3358507 3310
c45a6816 3311static unsigned int features[] = {
f3358507
MT
3312 VIRTNET_FEATURES,
3313};
3314
3315static unsigned int features_legacy[] = {
3316 VIRTNET_FEATURES,
3317 VIRTIO_NET_F_GSO,
e7428e95 3318 VIRTIO_F_ANY_LAYOUT,
c45a6816
RR
3319};
3320
22402529 3321static struct virtio_driver virtio_net_driver = {
c45a6816
RR
3322 .feature_table = features,
3323 .feature_table_size = ARRAY_SIZE(features),
f3358507
MT
3324 .feature_table_legacy = features_legacy,
3325 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
296f96fc
RR
3326 .driver.name = KBUILD_MODNAME,
3327 .driver.owner = THIS_MODULE,
3328 .id_table = id_table,
fe36cbe0 3329 .validate = virtnet_validate,
296f96fc 3330 .probe = virtnet_probe,
8cc085d6 3331 .remove = virtnet_remove,
9f4d26d0 3332 .config_changed = virtnet_config_changed,
89107000 3333#ifdef CONFIG_PM_SLEEP
0741bcb5
AS
3334 .freeze = virtnet_freeze,
3335 .restore = virtnet_restore,
3336#endif
296f96fc
RR
3337};
3338
8017c279
SAS
3339static __init int virtio_net_driver_init(void)
3340{
3341 int ret;
3342
73c1b41e 3343 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
8017c279
SAS
3344 virtnet_cpu_online,
3345 virtnet_cpu_down_prep);
3346 if (ret < 0)
3347 goto out;
3348 virtionet_online = ret;
73c1b41e 3349 ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
8017c279
SAS
3350 NULL, virtnet_cpu_dead);
3351 if (ret)
3352 goto err_dead;
3353
3354 ret = register_virtio_driver(&virtio_net_driver);
3355 if (ret)
3356 goto err_virtio;
3357 return 0;
3358err_virtio:
3359 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
3360err_dead:
3361 cpuhp_remove_multi_state(virtionet_online);
3362out:
3363 return ret;
3364}
3365module_init(virtio_net_driver_init);
3366
3367static __exit void virtio_net_driver_exit(void)
3368{
cfa0ebc9 3369 unregister_virtio_driver(&virtio_net_driver);
8017c279
SAS
3370 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
3371 cpuhp_remove_multi_state(virtionet_online);
8017c279
SAS
3372}
3373module_exit(virtio_net_driver_exit);
296f96fc
RR
3374
3375MODULE_DEVICE_TABLE(virtio, id_table);
3376MODULE_DESCRIPTION("Virtio network driver");
3377MODULE_LICENSE("GPL");