fbdev: imsttfb: Fix use after free bug in imsttfb_probe
[linux-block.git] / drivers / net / virtio_net.c
CommitLineData
1ccea77e 1// SPDX-License-Identifier: GPL-2.0-or-later
48925e37 2/* A network driver using virtio.
296f96fc
RR
3 *
4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
296f96fc
RR
5 */
6//#define DEBUG
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>
a9ea3fc6 9#include <linux/ethtool.h>
296f96fc
RR
10#include <linux/module.h>
11#include <linux/virtio.h>
12#include <linux/virtio_net.h>
f600b690 13#include <linux/bpf.h>
a67edbf4 14#include <linux/bpf_trace.h>
296f96fc 15#include <linux/scatterlist.h>
e918085a 16#include <linux/if_vlan.h>
5a0e3ad6 17#include <linux/slab.h>
8de4b2f3 18#include <linux/cpu.h>
ab7db917 19#include <linux/average.h>
186b3c99 20#include <linux/filter.h>
2ca653d6 21#include <linux/kernel.h>
d85b758f 22#include <net/route.h>
754b8a21 23#include <net/xdp.h>
ba5e4426 24#include <net/net_failover.h>
296f96fc 25
d34710e3 26static int napi_weight = NAPI_POLL_WEIGHT;
6c0cd7c0
DL
27module_param(napi_weight, int, 0444);
28
31c03aef 29static bool csum = true, gso = true, napi_tx = true;
34a48579
RR
30module_param(csum, bool, 0444);
31module_param(gso, bool, 0444);
b92f1e67 32module_param(napi_tx, bool, 0644);
34a48579 33
296f96fc 34/* FIXME: MTU in config. */
5061de36 35#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
3f2c31d9 36#define GOOD_COPY_LEN 128
296f96fc 37
f6b10209
JW
38#define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
39
2de2f7f4
JF
40/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
41#define VIRTIO_XDP_HEADROOM 256
42
2471c75e
JDB
43/* Separating two types of XDP xmit */
44#define VIRTIO_XDP_TX BIT(0)
45#define VIRTIO_XDP_REDIR BIT(1)
46
5050471d
TM
47#define VIRTIO_XDP_FLAG BIT(0)
48
5377d758
JB
49/* RX packet size EWMA. The average packet size is used to determine the packet
50 * buffer size when refilling RX rings. As the entire RX ring may be refilled
51 * at once, the weight is chosen so that the EWMA will be insensitive to short-
52 * term, transient changes in packet size.
ab7db917 53 */
eb1e011a 54DECLARE_EWMA(pkt_len, 0, 64)
ab7db917 55
66846048 56#define VIRTNET_DRIVER_VERSION "1.0.0"
2a41f71d 57
7acd4329
CIK
58static const unsigned long guest_offloads[] = {
59 VIRTIO_NET_F_GUEST_TSO4,
60 VIRTIO_NET_F_GUEST_TSO6,
61 VIRTIO_NET_F_GUEST_ECN,
e59ff2c4 62 VIRTIO_NET_F_GUEST_UFO,
418044e1
AM
63 VIRTIO_NET_F_GUEST_CSUM,
64 VIRTIO_NET_F_GUEST_USO4,
be50da3e
JP
65 VIRTIO_NET_F_GUEST_USO6,
66 VIRTIO_NET_F_GUEST_HDRLEN
7acd4329 67};
3f93522f 68
dbcf24d1 69#define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
1a03b8a3
TZ
70 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
71 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
418044e1
AM
72 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
73 (1ULL << VIRTIO_NET_F_GUEST_USO4) | \
74 (1ULL << VIRTIO_NET_F_GUEST_USO6))
1a03b8a3 75
d7dfc5cf
TM
76struct virtnet_stat_desc {
77 char desc[ETH_GSTRING_LEN];
78 size_t offset;
3fa2a1df 79};
80
d7dfc5cf
TM
81struct virtnet_sq_stats {
82 struct u64_stats_sync syncp;
83 u64 packets;
84 u64 bytes;
5b8f3c8d
TM
85 u64 xdp_tx;
86 u64 xdp_tx_drops;
461f03dc 87 u64 kicks;
a520794b 88 u64 tx_timeouts;
d7dfc5cf
TM
89};
90
d46eeeaf
JW
91struct virtnet_rq_stats {
92 struct u64_stats_sync syncp;
d7dfc5cf
TM
93 u64 packets;
94 u64 bytes;
2c4a2f7d 95 u64 drops;
5b8f3c8d
TM
96 u64 xdp_packets;
97 u64 xdp_tx;
98 u64 xdp_redirects;
99 u64 xdp_drops;
461f03dc 100 u64 kicks;
d7dfc5cf
TM
101};
102
103#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
d46eeeaf 104#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m)
d7dfc5cf
TM
105
106static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
5b8f3c8d
TM
107 { "packets", VIRTNET_SQ_STAT(packets) },
108 { "bytes", VIRTNET_SQ_STAT(bytes) },
109 { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) },
110 { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) },
461f03dc 111 { "kicks", VIRTNET_SQ_STAT(kicks) },
a520794b 112 { "tx_timeouts", VIRTNET_SQ_STAT(tx_timeouts) },
d7dfc5cf
TM
113};
114
115static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
5b8f3c8d
TM
116 { "packets", VIRTNET_RQ_STAT(packets) },
117 { "bytes", VIRTNET_RQ_STAT(bytes) },
118 { "drops", VIRTNET_RQ_STAT(drops) },
119 { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) },
120 { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) },
121 { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) },
122 { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) },
461f03dc 123 { "kicks", VIRTNET_RQ_STAT(kicks) },
d7dfc5cf
TM
124};
125
126#define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc)
127#define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc)
128
e9d7417b
JW
129/* Internal representation of a send virtqueue */
130struct send_queue {
131 /* Virtqueue associated with this send _queue */
132 struct virtqueue *vq;
133
134 /* TX: fragments + linear part + virtio header */
135 struct scatterlist sg[MAX_SKB_FRAGS + 2];
986a4f4d
JW
136
137 /* Name of the send queue: output.$index */
d0671115 138 char name[16];
b92f1e67 139
d7dfc5cf
TM
140 struct virtnet_sq_stats stats;
141
b92f1e67 142 struct napi_struct napi;
ebcce492
XZ
143
144 /* Record whether sq is in reset state. */
145 bool reset;
e9d7417b
JW
146};
147
148/* Internal representation of a receive virtqueue */
149struct receive_queue {
150 /* Virtqueue associated with this receive_queue */
151 struct virtqueue *vq;
152
296f96fc
RR
153 struct napi_struct napi;
154
f600b690
JF
155 struct bpf_prog __rcu *xdp_prog;
156
d7dfc5cf
TM
157 struct virtnet_rq_stats stats;
158
e9d7417b
JW
159 /* Chain pages by the private ptr. */
160 struct page *pages;
161
ab7db917 162 /* Average packet length for mergeable receive buffers. */
5377d758 163 struct ewma_pkt_len mrg_avg_pkt_len;
ab7db917 164
fb51879d
MD
165 /* Page frag for packet buffer allocation. */
166 struct page_frag alloc_frag;
167
e9d7417b
JW
168 /* RX: fragments + linear part + virtio header */
169 struct scatterlist sg[MAX_SKB_FRAGS + 2];
986a4f4d 170
d85b758f
MT
171 /* Min single buffer size for mergeable buffers case. */
172 unsigned int min_buf_len;
173
986a4f4d 174 /* Name of this receive queue: input.$index */
d0671115 175 char name[16];
754b8a21
JDB
176
177 struct xdp_rxq_info xdp_rxq;
e9d7417b
JW
178};
179
c7114b12
AM
180/* This structure can contain rss message with maximum settings for indirection table and keysize
181 * Note, that default structure that describes RSS configuration virtio_net_rss_config
182 * contains same info but can't handle table values.
183 * In any case, structure would be passed to virtio hw through sg_buf split by parts
184 * because table sizes may be differ according to the device configuration.
185 */
186#define VIRTIO_NET_RSS_MAX_KEY_SIZE 40
187#define VIRTIO_NET_RSS_MAX_TABLE_LEN 128
188struct virtio_net_ctrl_rss {
189 u32 hash_types;
190 u16 indirection_table_mask;
191 u16 unclassified_queue;
192 u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN];
193 u16 max_tx_vq;
194 u8 hash_key_length;
195 u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
196};
197
12e57169
MT
198/* Control VQ buffers: protected by the rtnl lock */
199struct control_buf {
200 struct virtio_net_ctrl_hdr hdr;
201 virtio_net_ctrl_ack status;
202 struct virtio_net_ctrl_mq mq;
203 u8 promisc;
204 u8 allmulti;
d7fad4c8 205 __virtio16 vid;
f4ee703a 206 __virtio64 offloads;
c7114b12 207 struct virtio_net_ctrl_rss rss;
12e57169
MT
208};
209
e9d7417b
JW
210struct virtnet_info {
211 struct virtio_device *vdev;
212 struct virtqueue *cvq;
213 struct net_device *dev;
986a4f4d
JW
214 struct send_queue *sq;
215 struct receive_queue *rq;
e9d7417b
JW
216 unsigned int status;
217
986a4f4d
JW
218 /* Max # of queue pairs supported by the device */
219 u16 max_queue_pairs;
220
221 /* # of queue pairs currently used by the driver */
222 u16 curr_queue_pairs;
223
672aafd5
JF
224 /* # of XDP queue pairs currently used by the driver */
225 u16 xdp_queue_pairs;
226
97c2c69e
XZ
227 /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
228 bool xdp_enabled;
229
97402b96
HX
230 /* I like... big packets and I cannot lie! */
231 bool big_packets;
232
4959aebb
GL
233 /* number of sg entries allocated for big packets */
234 unsigned int big_packets_num_skbfrags;
235
3f2c31d9
MM
236 /* Host will merge rx buffers for big packets (shake it! shake it!) */
237 bool mergeable_rx_bufs;
238
c7114b12
AM
239 /* Host supports rss and/or hash report */
240 bool has_rss;
91f41f01 241 bool has_rss_hash_report;
c7114b12
AM
242 u8 rss_key_size;
243 u16 rss_indir_table_size;
244 u32 rss_hash_types_supported;
c1170820 245 u32 rss_hash_types_saved;
c7114b12 246
986a4f4d
JW
247 /* Has control virtqueue */
248 bool has_cvq;
249
e7428e95
MT
250 /* Host can handle any s/g split between our header and packet data */
251 bool any_header_sg;
252
012873d0
MT
253 /* Packet virtio header size */
254 u8 hdr_len;
255
5a159128 256 /* Work struct for delayed refilling if we run low on memory. */
3161e453
RR
257 struct delayed_work refill;
258
5a159128
JW
259 /* Is delayed refill enabled? */
260 bool refill_enabled;
261
262 /* The lock to synchronize the access to refill_enabled */
263 spinlock_t refill_lock;
264
586d17c5
JW
265 /* Work struct for config space updates */
266 struct work_struct config_work;
267
986a4f4d
JW
268 /* Does the affinity hint is set for virtqueues? */
269 bool affinity_hint_set;
47be2479 270
8017c279
SAS
271 /* CPU hotplug instances for online & dead */
272 struct hlist_node node;
273 struct hlist_node node_dead;
2ac46030 274
12e57169 275 struct control_buf *ctrl;
16032be5
NA
276
277 /* Ethtool settings */
278 u8 duplex;
279 u32 speed;
3f93522f 280
699b045a
AK
281 /* Interrupt coalescing settings */
282 u32 tx_usecs;
283 u32 rx_usecs;
284 u32 tx_max_packets;
285 u32 rx_max_packets;
286
3f93522f 287 unsigned long guest_offloads;
a02e8964 288 unsigned long guest_offloads_capable;
ba5e4426
SS
289
290 /* failover when STANDBY feature enabled */
291 struct failover *failover;
296f96fc
RR
292};
293
9ab86bbc 294struct padded_vnet_hdr {
c1ddc42d 295 struct virtio_net_hdr_v1_hash hdr;
9ab86bbc 296 /*
012873d0
MT
297 * hdr is in a separate sg buffer, and data sg buffer shares same page
298 * with this header sg. This padding makes next sg 16 byte aligned
299 * after the header.
9ab86bbc 300 */
c1ddc42d 301 char padding[12];
9ab86bbc
SM
302};
303
6a4763e2 304static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
ebcce492 305static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
6a4763e2 306
5050471d
TM
307static bool is_xdp_frame(void *ptr)
308{
309 return (unsigned long)ptr & VIRTIO_XDP_FLAG;
310}
311
312static void *xdp_to_ptr(struct xdp_frame *ptr)
313{
314 return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
315}
316
317static struct xdp_frame *ptr_to_xdp(void *ptr)
318{
319 return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
320}
321
986a4f4d
JW
322/* Converting between virtqueue no. and kernel tx/rx queue no.
323 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
324 */
325static int vq2txq(struct virtqueue *vq)
326{
9d0ca6ed 327 return (vq->index - 1) / 2;
986a4f4d
JW
328}
329
330static int txq2vq(int txq)
331{
332 return txq * 2 + 1;
333}
334
335static int vq2rxq(struct virtqueue *vq)
336{
9d0ca6ed 337 return vq->index / 2;
986a4f4d
JW
338}
339
340static int rxq2vq(int rxq)
341{
342 return rxq * 2;
343}
344
012873d0 345static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
296f96fc 346{
012873d0 347 return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
296f96fc
RR
348}
349
9ab86bbc
SM
350/*
351 * private is used to chain pages for big packets, put the whole
352 * most recent used list in the beginning for reuse
353 */
e9d7417b 354static void give_pages(struct receive_queue *rq, struct page *page)
0a888fd1 355{
9ab86bbc 356 struct page *end;
0a888fd1 357
e9d7417b 358 /* Find end of list, sew whole thing into vi->rq.pages. */
9ab86bbc 359 for (end = page; end->private; end = (struct page *)end->private);
e9d7417b
JW
360 end->private = (unsigned long)rq->pages;
361 rq->pages = page;
0a888fd1
MM
362}
363
e9d7417b 364static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
fb6813f4 365{
e9d7417b 366 struct page *p = rq->pages;
fb6813f4 367
9ab86bbc 368 if (p) {
e9d7417b 369 rq->pages = (struct page *)p->private;
9ab86bbc
SM
370 /* clear private here, it is used to chain pages */
371 p->private = 0;
372 } else
fb6813f4
RR
373 p = alloc_page(gfp_mask);
374 return p;
375}
376
5a159128
JW
377static void enable_delayed_refill(struct virtnet_info *vi)
378{
379 spin_lock_bh(&vi->refill_lock);
380 vi->refill_enabled = true;
381 spin_unlock_bh(&vi->refill_lock);
382}
383
384static void disable_delayed_refill(struct virtnet_info *vi)
385{
386 spin_lock_bh(&vi->refill_lock);
387 vi->refill_enabled = false;
388 spin_unlock_bh(&vi->refill_lock);
389}
390
e4e8452a
WB
391static void virtqueue_napi_schedule(struct napi_struct *napi,
392 struct virtqueue *vq)
393{
394 if (napi_schedule_prep(napi)) {
395 virtqueue_disable_cb(vq);
396 __napi_schedule(napi);
397 }
398}
399
400static void virtqueue_napi_complete(struct napi_struct *napi,
401 struct virtqueue *vq, int processed)
402{
403 int opaque;
404
405 opaque = virtqueue_enable_cb_prepare(vq);
fdaa767a
TM
406 if (napi_complete_done(napi, processed)) {
407 if (unlikely(virtqueue_poll(vq, opaque)))
408 virtqueue_napi_schedule(napi, vq);
409 } else {
410 virtqueue_disable_cb(vq);
411 }
e4e8452a
WB
412}
413
e9d7417b 414static void skb_xmit_done(struct virtqueue *vq)
296f96fc 415{
e9d7417b 416 struct virtnet_info *vi = vq->vdev->priv;
b92f1e67 417 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
296f96fc 418
2cb9c6ba 419 /* Suppress further interrupts. */
e9d7417b 420 virtqueue_disable_cb(vq);
11a3a154 421
b92f1e67
WB
422 if (napi->weight)
423 virtqueue_napi_schedule(napi, vq);
424 else
425 /* We were probably waiting for more output buffers. */
426 netif_wake_subqueue(vi->dev, vq2txq(vq));
296f96fc
RR
427}
428
28b39bc7
JW
429#define MRG_CTX_HEADER_SHIFT 22
430static void *mergeable_len_to_ctx(unsigned int truesize,
431 unsigned int headroom)
432{
433 return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
434}
435
436static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
437{
438 return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
439}
440
441static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
442{
443 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
444}
445
3464645a 446/* Called from bottom half context */
946fa564
MT
447static struct sk_buff *page_to_skb(struct virtnet_info *vi,
448 struct receive_queue *rq,
2613af0e 449 struct page *page, unsigned int offset,
fa0f1ba7
XZ
450 unsigned int len, unsigned int truesize,
451 unsigned int headroom)
9ab86bbc
SM
452{
453 struct sk_buff *skb;
012873d0 454 struct virtio_net_hdr_mrg_rxbuf *hdr;
2613af0e 455 unsigned int copy, hdr_len, hdr_padded_len;
af39c8f7 456 struct page *page_to_free = NULL;
fb32856b 457 int tailroom, shinfo_size;
f80bd740 458 char *p, *hdr_p, *buf;
fb6813f4 459
2613af0e 460 p = page_address(page) + offset;
fb32856b 461 hdr_p = p;
3f2c31d9 462
012873d0
MT
463 hdr_len = vi->hdr_len;
464 if (vi->mergeable_rx_bufs)
c1ddc42d 465 hdr_padded_len = hdr_len;
012873d0 466 else
2613af0e 467 hdr_padded_len = sizeof(struct padded_vnet_hdr);
3f2c31d9 468
fa0f1ba7 469 buf = p - headroom;
9ab86bbc 470 len -= hdr_len;
2613af0e
MD
471 offset += hdr_padded_len;
472 p += hdr_padded_len;
fa0f1ba7 473 tailroom = truesize - headroom - hdr_padded_len - len;
3f2c31d9 474
fb32856b
XZ
475 shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
476
f80bd740 477 /* copy small packet so we can reuse these pages */
f5d7872a 478 if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
f80bd740 479 skb = build_skb(buf, truesize);
fb32856b
XZ
480 if (unlikely(!skb))
481 return NULL;
482
f80bd740 483 skb_reserve(skb, p - buf);
fb32856b 484 skb_put(skb, len);
afd92d82
JW
485
486 page = (struct page *)page->private;
487 if (page)
488 give_pages(rq, page);
fb32856b
XZ
489 goto ok;
490 }
491
492 /* copy small packet so we can reuse these pages for small data */
493 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
494 if (unlikely(!skb))
495 return NULL;
496
0f6925b3
ED
497 /* Copy all frame if it fits skb->head, otherwise
498 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
499 */
500 if (len <= skb_tailroom(skb))
501 copy = len;
502 else
18117a84 503 copy = ETH_HLEN;
59ae1d12 504 skb_put_data(skb, p, copy);
3f2c31d9 505
9ab86bbc
SM
506 len -= copy;
507 offset += copy;
3f2c31d9 508
2613af0e
MD
509 if (vi->mergeable_rx_bufs) {
510 if (len)
511 skb_add_rx_frag(skb, 0, page, offset, len, truesize);
512 else
af39c8f7 513 page_to_free = page;
fb32856b 514 goto ok;
2613af0e
MD
515 }
516
e878d78b
SL
517 /*
518 * Verify that we can indeed put this data into a skb.
519 * This is here to handle cases when the device erroneously
520 * tries to receive more than is possible. This is usually
521 * the case of a broken device.
522 */
523 if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
be443899 524 net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
e878d78b
SL
525 dev_kfree_skb(skb);
526 return NULL;
527 }
2613af0e 528 BUG_ON(offset >= PAGE_SIZE);
9ab86bbc 529 while (len) {
2613af0e
MD
530 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
531 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
532 frag_size, truesize);
533 len -= frag_size;
9ab86bbc
SM
534 page = (struct page *)page->private;
535 offset = 0;
536 }
3f2c31d9 537
9ab86bbc 538 if (page)
e9d7417b 539 give_pages(rq, page);
3f2c31d9 540
fb32856b 541ok:
18117a84
HQ
542 hdr = skb_vnet_hdr(skb);
543 memcpy(hdr, hdr_p, hdr_len);
af39c8f7
ED
544 if (page_to_free)
545 put_page(page_to_free);
fb32856b 546
9ab86bbc
SM
547 return skb;
548}
3f2c31d9 549
25074a44
XZ
550static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
551{
552 unsigned int len;
553 unsigned int packets = 0;
554 unsigned int bytes = 0;
555 void *ptr;
556
557 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
558 if (likely(!is_xdp_frame(ptr))) {
559 struct sk_buff *skb = ptr;
560
561 pr_debug("Sent skb %p\n", skb);
562
563 bytes += skb->len;
564 napi_consume_skb(skb, in_napi);
565 } else {
566 struct xdp_frame *frame = ptr_to_xdp(ptr);
567
568 bytes += xdp_get_frame_len(frame);
569 xdp_return_frame(frame);
570 }
571 packets++;
572 }
573
574 /* Avoid overhead when no packets have been processed
575 * happens when called speculatively from start_xmit.
576 */
577 if (!packets)
578 return;
579
580 u64_stats_update_begin(&sq->stats.syncp);
581 sq->stats.bytes += bytes;
582 sq->stats.packets += packets;
583 u64_stats_update_end(&sq->stats.syncp);
584}
585
586static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
587{
588 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
589 return false;
590 else if (q < vi->curr_queue_pairs)
591 return true;
592 else
593 return false;
594}
595
b8ef4809
XZ
596static void check_sq_full_and_disable(struct virtnet_info *vi,
597 struct net_device *dev,
598 struct send_queue *sq)
599{
600 bool use_napi = sq->napi.weight;
601 int qnum;
602
603 qnum = sq - vi->sq;
604
605 /* If running out of space, stop queue to avoid getting packets that we
606 * are then unable to transmit.
607 * An alternative would be to force queuing layer to requeue the skb by
608 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
609 * returned in a normal path of operation: it means that driver is not
610 * maintaining the TX queue stop/start state properly, and causes
611 * the stack to do a non-trivial amount of useless work.
612 * Since most packets only take 1 or 2 ring slots, stopping the queue
613 * early means 16 slots are typically wasted.
614 */
615 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
616 netif_stop_subqueue(dev, qnum);
617 if (use_napi) {
618 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
619 virtqueue_napi_schedule(&sq->napi, sq->vq);
620 } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
621 /* More just got used, free them then recheck. */
622 free_old_xmit_skbs(sq, false);
623 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
624 netif_start_subqueue(dev, qnum);
625 virtqueue_disable_cb(sq->vq);
626 }
627 }
628 }
629}
630
735fc405
JDB
631static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
632 struct send_queue *sq,
633 struct xdp_frame *xdpf)
56434a01 634{
56434a01 635 struct virtio_net_hdr_mrg_rxbuf *hdr;
97717e8d
HQ
636 struct skb_shared_info *shinfo;
637 u8 nr_frags = 0;
638 int err, i;
56434a01 639
cac320c8
JDB
640 if (unlikely(xdpf->headroom < vi->hdr_len))
641 return -EOVERFLOW;
642
97717e8d
HQ
643 if (unlikely(xdp_frame_has_frags(xdpf))) {
644 shinfo = xdp_get_shared_info_from_frame(xdpf);
645 nr_frags = shinfo->nr_frags;
646 }
647
648 /* In wrapping function virtnet_xdp_xmit(), we need to free
649 * up the pending old buffers, where we need to calculate the
650 * position of skb_shared_info in xdp_get_frame_len() and
651 * xdp_return_frame(), which will involve to xdpf->data and
652 * xdpf->headroom. Therefore, we need to update the value of
653 * headroom synchronously here.
654 */
655 xdpf->headroom -= vi->hdr_len;
cac320c8 656 xdpf->data -= vi->hdr_len;
f6b10209 657 /* Zero header and leave csum up to XDP layers */
cac320c8 658 hdr = xdpf->data;
f6b10209 659 memset(hdr, 0, vi->hdr_len);
cac320c8 660 xdpf->len += vi->hdr_len;
bb91accf 661
97717e8d
HQ
662 sg_init_table(sq->sg, nr_frags + 1);
663 sg_set_buf(sq->sg, xdpf->data, xdpf->len);
664 for (i = 0; i < nr_frags; i++) {
665 skb_frag_t *frag = &shinfo->frags[i];
666
667 sg_set_page(&sq->sg[i + 1], skb_frag_page(frag),
668 skb_frag_size(frag), skb_frag_off(frag));
669 }
bb91accf 670
97717e8d
HQ
671 err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1,
672 xdp_to_ptr(xdpf), GFP_ATOMIC);
11b7d897 673 if (unlikely(err))
cac320c8 674 return -ENOSPC; /* Caller handle free/refcnt */
56434a01 675
cac320c8 676 return 0;
56434a01
JF
677}
678
97c2c69e
XZ
679/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
680 * the current cpu, so it does not need to be locked.
681 *
682 * Here we use marco instead of inline functions because we have to deal with
683 * three issues at the same time: 1. the choice of sq. 2. judge and execute the
684 * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
685 * functions to perfectly solve these three problems at the same time.
686 */
687#define virtnet_xdp_get_sq(vi) ({ \
3dcc1edc 688 int cpu = smp_processor_id(); \
97c2c69e
XZ
689 struct netdev_queue *txq; \
690 typeof(vi) v = (vi); \
691 unsigned int qp; \
692 \
693 if (v->curr_queue_pairs > nr_cpu_ids) { \
694 qp = v->curr_queue_pairs - v->xdp_queue_pairs; \
3dcc1edc 695 qp += cpu; \
97c2c69e
XZ
696 txq = netdev_get_tx_queue(v->dev, qp); \
697 __netif_tx_acquire(txq); \
698 } else { \
3dcc1edc 699 qp = cpu % v->curr_queue_pairs; \
97c2c69e 700 txq = netdev_get_tx_queue(v->dev, qp); \
3dcc1edc 701 __netif_tx_lock(txq, cpu); \
97c2c69e
XZ
702 } \
703 v->sq + qp; \
704})
705
706#define virtnet_xdp_put_sq(vi, q) { \
707 struct netdev_queue *txq; \
708 typeof(vi) v = (vi); \
709 \
710 txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
711 if (v->curr_queue_pairs > nr_cpu_ids) \
712 __netif_tx_release(txq); \
713 else \
714 __netif_tx_unlock(txq); \
2a43565c
TM
715}
716
735fc405 717static int virtnet_xdp_xmit(struct net_device *dev,
42b33468 718 int n, struct xdp_frame **frames, u32 flags)
186b3c99
JW
719{
720 struct virtnet_info *vi = netdev_priv(dev);
8dcc5b0a
JDB
721 struct receive_queue *rq = vi->rq;
722 struct bpf_prog *xdp_prog;
735fc405
JDB
723 struct send_queue *sq;
724 unsigned int len;
546f2897
TM
725 int packets = 0;
726 int bytes = 0;
fdc13979 727 int nxmit = 0;
461f03dc 728 int kicks = 0;
5050471d 729 void *ptr;
fdc13979 730 int ret;
735fc405
JDB
731 int i;
732
8dcc5b0a
JDB
733 /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
734 * indicate XDP resources have been successfully allocated.
735 */
9719c6b9 736 xdp_prog = rcu_access_pointer(rq->xdp_prog);
1667c08a
TM
737 if (!xdp_prog)
738 return -ENXIO;
739
97c2c69e 740 sq = virtnet_xdp_get_sq(vi);
1667c08a
TM
741
742 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
743 ret = -EINVAL;
5b8f3c8d
TM
744 goto out;
745 }
8dcc5b0a 746
735fc405 747 /* Free up any pending old buffers before queueing new ones. */
5050471d 748 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
546f2897
TM
749 if (likely(is_xdp_frame(ptr))) {
750 struct xdp_frame *frame = ptr_to_xdp(ptr);
751
50bd14bc 752 bytes += xdp_get_frame_len(frame);
546f2897
TM
753 xdp_return_frame(frame);
754 } else {
755 struct sk_buff *skb = ptr;
756
757 bytes += skb->len;
758 napi_consume_skb(skb, false);
759 }
760 packets++;
5050471d 761 }
735fc405
JDB
762
763 for (i = 0; i < n; i++) {
764 struct xdp_frame *xdpf = frames[i];
765
fdc13979
LB
766 if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
767 break;
768 nxmit++;
735fc405 769 }
fdc13979 770 ret = nxmit;
5d274cb4 771
cd1c604a
XZ
772 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
773 check_sq_full_and_disable(vi, dev, sq);
774
461f03dc
TM
775 if (flags & XDP_XMIT_FLUSH) {
776 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
777 kicks = 1;
778 }
5b8f3c8d
TM
779out:
780 u64_stats_update_begin(&sq->stats.syncp);
546f2897
TM
781 sq->stats.bytes += bytes;
782 sq->stats.packets += packets;
5b8f3c8d 783 sq->stats.xdp_tx += n;
fdc13979 784 sq->stats.xdp_tx_drops += n - nxmit;
461f03dc 785 sq->stats.kicks += kicks;
5b8f3c8d 786 u64_stats_update_end(&sq->stats.syncp);
5d274cb4 787
97c2c69e 788 virtnet_xdp_put_sq(vi, sq);
5b8f3c8d 789 return ret;
186b3c99
JW
790}
791
f6b10209
JW
792static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
793{
97c2c69e 794 return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
f6b10209
JW
795}
796
4941d472
JW
797/* We copy the packet for XDP in the following cases:
798 *
799 * 1) Packet is scattered across multiple rx buffers.
800 * 2) Headroom space is insufficient.
801 *
802 * This is inefficient but it's a temporary condition that
803 * we hit right after XDP is enabled and until queue is refilled
804 * with large buffers with sufficient headroom - so it should affect
805 * at most queue size packets.
806 * Afterwards, the conditions to enable
807 * XDP should preclude the underlying device from sending packets
808 * across multiple buffers (num_buf > 1), and we make sure buffers
809 * have enough headroom.
810 */
811static struct page *xdp_linearize_page(struct receive_queue *rq,
981f14d4 812 int *num_buf,
4941d472
JW
813 struct page *p,
814 int offset,
815 int page_off,
816 unsigned int *len)
817{
853618d5
XZ
818 int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
819 struct page *page;
4941d472 820
853618d5
XZ
821 if (page_off + *len + tailroom > PAGE_SIZE)
822 return NULL;
823
824 page = alloc_page(GFP_ATOMIC);
4941d472
JW
825 if (!page)
826 return NULL;
827
828 memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
829 page_off += *len;
830
831 while (--*num_buf) {
832 unsigned int buflen;
833 void *buf;
834 int off;
835
836 buf = virtqueue_get_buf(rq->vq, &buflen);
837 if (unlikely(!buf))
838 goto err_buf;
839
840 p = virt_to_head_page(buf);
841 off = buf - page_address(p);
842
843 /* guard against a misconfigured or uncooperative backend that
844 * is sending packet larger than the MTU.
845 */
3cc81a9a 846 if ((page_off + buflen + tailroom) > PAGE_SIZE) {
4941d472
JW
847 put_page(p);
848 goto err_buf;
849 }
850
851 memcpy(page_address(page) + page_off,
852 page_address(p) + off, buflen);
853 page_off += buflen;
854 put_page(p);
855 }
856
857 /* Headroom does not contribute to packet length */
858 *len = page_off - VIRTIO_XDP_HEADROOM;
859 return page;
860err_buf:
861 __free_pages(page, 0);
862 return NULL;
863}
864
bb91accf
JW
865static struct sk_buff *receive_small(struct net_device *dev,
866 struct virtnet_info *vi,
867 struct receive_queue *rq,
192f68cf 868 void *buf, void *ctx,
186b3c99 869 unsigned int len,
7d9d60fd 870 unsigned int *xdp_xmit,
d46eeeaf 871 struct virtnet_rq_stats *stats)
f121159d 872{
f6b10209 873 struct sk_buff *skb;
bb91accf 874 struct bpf_prog *xdp_prog;
4941d472 875 unsigned int xdp_headroom = (unsigned long)ctx;
f6b10209
JW
876 unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
877 unsigned int headroom = vi->hdr_len + header_offset;
878 unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
879 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4941d472 880 struct page *page = virt_to_head_page(buf);
11b7d897 881 unsigned int delta = 0;
4941d472 882 struct page *xdp_page;
11b7d897 883 int err;
503d539a 884 unsigned int metasize = 0;
11b7d897 885
012873d0 886 len -= vi->hdr_len;
d46eeeaf 887 stats->bytes += len;
f121159d 888
ad993a95
XY
889 if (unlikely(len > GOOD_PACKET_LEN)) {
890 pr_debug("%s: rx error: len %u exceeds max size %d\n",
891 dev->name, len, GOOD_PACKET_LEN);
892 dev->stats.rx_length_errors++;
053c9e18 893 goto err;
ad993a95 894 }
6213f07c
LR
895
896 if (likely(!vi->xdp_enabled)) {
897 xdp_prog = NULL;
898 goto skip_xdp;
899 }
900
bb91accf
JW
901 rcu_read_lock();
902 xdp_prog = rcu_dereference(rq->xdp_prog);
903 if (xdp_prog) {
f6b10209 904 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
44fa2dbd 905 struct xdp_frame *xdpf;
0354e4d1 906 struct xdp_buff xdp;
f6b10209 907 void *orig_data;
bb91accf
JW
908 u32 act;
909
95dbe9e7 910 if (unlikely(hdr->hdr.gso_type))
bb91accf 911 goto err_xdp;
0354e4d1 912
4941d472
JW
913 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
914 int offset = buf - page_address(page) + header_offset;
915 unsigned int tlen = len + vi->hdr_len;
981f14d4 916 int num_buf = 1;
4941d472
JW
917
918 xdp_headroom = virtnet_get_headroom(vi);
919 header_offset = VIRTNET_RX_PAD + xdp_headroom;
920 headroom = vi->hdr_len + header_offset;
921 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
922 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
923 xdp_page = xdp_linearize_page(rq, &num_buf, page,
924 offset, header_offset,
925 &tlen);
926 if (!xdp_page)
927 goto err_xdp;
928
929 buf = page_address(xdp_page);
930 put_page(page);
931 page = xdp_page;
932 }
933
43b5169d 934 xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
be9df4af
LB
935 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
936 xdp_headroom, len, true);
f6b10209 937 orig_data = xdp.data;
0354e4d1 938 act = bpf_prog_run_xdp(xdp_prog, &xdp);
d46eeeaf 939 stats->xdp_packets++;
0354e4d1 940
bb91accf
JW
941 switch (act) {
942 case XDP_PASS:
2de2f7f4 943 /* Recalculate length in case bpf program changed it */
f6b10209 944 delta = orig_data - xdp.data;
6870de43 945 len = xdp.data_end - xdp.data;
503d539a 946 metasize = xdp.data - xdp.data_meta;
bb91accf
JW
947 break;
948 case XDP_TX:
d46eeeaf 949 stats->xdp_tx++;
1b698fa5 950 xdpf = xdp_convert_buff_to_frame(&xdp);
44fa2dbd
JDB
951 if (unlikely(!xdpf))
952 goto err_xdp;
ca9e83b4 953 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
fdc13979
LB
954 if (unlikely(!err)) {
955 xdp_return_frame_rx_napi(xdpf);
956 } else if (unlikely(err < 0)) {
0354e4d1 957 trace_xdp_exception(vi->dev, xdp_prog, act);
11b7d897
JDB
958 goto err_xdp;
959 }
2471c75e 960 *xdp_xmit |= VIRTIO_XDP_TX;
186b3c99
JW
961 rcu_read_unlock();
962 goto xdp_xmit;
963 case XDP_REDIRECT:
d46eeeaf 964 stats->xdp_redirects++;
186b3c99 965 err = xdp_do_redirect(dev, &xdp, xdp_prog);
11b7d897
JDB
966 if (err)
967 goto err_xdp;
2471c75e 968 *xdp_xmit |= VIRTIO_XDP_REDIR;
bb91accf
JW
969 rcu_read_unlock();
970 goto xdp_xmit;
bb91accf 971 default:
c8064e5b 972 bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
df561f66 973 fallthrough;
0354e4d1
JF
974 case XDP_ABORTED:
975 trace_xdp_exception(vi->dev, xdp_prog, act);
95efabf0 976 goto err_xdp;
0354e4d1 977 case XDP_DROP:
bb91accf
JW
978 goto err_xdp;
979 }
980 }
981 rcu_read_unlock();
982
6213f07c 983skip_xdp:
f6b10209 984 skb = build_skb(buf, buflen);
053c9e18 985 if (!skb)
f6b10209 986 goto err;
f6b10209 987 skb_reserve(skb, headroom - delta);
6870de43 988 skb_put(skb, len);
f1d4884d 989 if (!xdp_prog) {
f6b10209
JW
990 buf += header_offset;
991 memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
f1d4884d 992 } /* keep zeroed vnet hdr since XDP is loaded */
f6b10209 993
503d539a
YK
994 if (metasize)
995 skb_metadata_set(skb, metasize);
996
f121159d 997 return skb;
bb91accf
JW
998
999err_xdp:
1000 rcu_read_unlock();
d46eeeaf 1001 stats->xdp_drops++;
053c9e18 1002err:
d46eeeaf 1003 stats->drops++;
4941d472 1004 put_page(page);
bb91accf
JW
1005xdp_xmit:
1006 return NULL;
f121159d
MT
1007}
1008
1009static struct sk_buff *receive_big(struct net_device *dev,
946fa564 1010 struct virtnet_info *vi,
f121159d
MT
1011 struct receive_queue *rq,
1012 void *buf,
7d9d60fd 1013 unsigned int len,
d46eeeaf 1014 struct virtnet_rq_stats *stats)
f121159d
MT
1015{
1016 struct page *page = buf;
503d539a 1017 struct sk_buff *skb =
fa0f1ba7 1018 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
f600b690 1019
d46eeeaf 1020 stats->bytes += len - vi->hdr_len;
f121159d
MT
1021 if (unlikely(!skb))
1022 goto err;
1023
1024 return skb;
1025
1026err:
d46eeeaf 1027 stats->drops++;
f121159d
MT
1028 give_pages(rq, page);
1029 return NULL;
1030}
1031
b26aa481
HQ
1032/* Why not use xdp_build_skb_from_frame() ?
1033 * XDP core assumes that xdp frags are PAGE_SIZE in length, while in
1034 * virtio-net there are 2 points that do not match its requirements:
1035 * 1. The size of the prefilled buffer is not fixed before xdp is set.
1036 * 2. xdp_build_skb_from_frame() does more checks that we don't need,
1037 * like eth_type_trans() (which virtio-net does in receive_buf()).
1038 */
1039static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
1040 struct virtnet_info *vi,
1041 struct xdp_buff *xdp,
1042 unsigned int xdp_frags_truesz)
1043{
1044 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
1045 unsigned int headroom, data_len;
1046 struct sk_buff *skb;
1047 int metasize;
1048 u8 nr_frags;
1049
1050 if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
1051 pr_debug("Error building skb as missing reserved tailroom for xdp");
1052 return NULL;
1053 }
1054
1055 if (unlikely(xdp_buff_has_frags(xdp)))
1056 nr_frags = sinfo->nr_frags;
1057
1058 skb = build_skb(xdp->data_hard_start, xdp->frame_sz);
1059 if (unlikely(!skb))
1060 return NULL;
1061
1062 headroom = xdp->data - xdp->data_hard_start;
1063 data_len = xdp->data_end - xdp->data;
1064 skb_reserve(skb, headroom);
1065 __skb_put(skb, data_len);
1066
1067 metasize = xdp->data - xdp->data_meta;
1068 metasize = metasize > 0 ? metasize : 0;
1069 if (metasize)
1070 skb_metadata_set(skb, metasize);
1071
1072 if (unlikely(xdp_buff_has_frags(xdp)))
1073 xdp_update_skb_shared_info(skb, nr_frags,
1074 sinfo->xdp_frags_size,
1075 xdp_frags_truesz,
1076 xdp_buff_is_frag_pfmemalloc(xdp));
1077
1078 return skb;
1079}
1080
ef75cb51
HQ
1081/* TODO: build xdp in big mode */
1082static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
1083 struct virtnet_info *vi,
1084 struct receive_queue *rq,
1085 struct xdp_buff *xdp,
1086 void *buf,
1087 unsigned int len,
1088 unsigned int frame_sz,
981f14d4 1089 int *num_buf,
ef75cb51
HQ
1090 unsigned int *xdp_frags_truesize,
1091 struct virtnet_rq_stats *stats)
1092{
1093 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1094 unsigned int headroom, tailroom, room;
1095 unsigned int truesize, cur_frag_size;
1096 struct skb_shared_info *shinfo;
1097 unsigned int xdp_frags_truesz = 0;
1098 struct page *page;
1099 skb_frag_t *frag;
1100 int offset;
1101 void *ctx;
1102
1103 xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
1104 xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM,
1105 VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
1106
981f14d4
HQ
1107 if (!*num_buf)
1108 return 0;
1109
ef75cb51
HQ
1110 if (*num_buf > 1) {
1111 /* If we want to build multi-buffer xdp, we need
1112 * to specify that the flags of xdp_buff have the
1113 * XDP_FLAGS_HAS_FRAG bit.
1114 */
1115 if (!xdp_buff_has_frags(xdp))
1116 xdp_buff_set_frags_flag(xdp);
1117
1118 shinfo = xdp_get_shared_info_from_buff(xdp);
1119 shinfo->nr_frags = 0;
1120 shinfo->xdp_frags_size = 0;
1121 }
1122
981f14d4 1123 if (*num_buf > MAX_SKB_FRAGS + 1)
ef75cb51
HQ
1124 return -EINVAL;
1125
981f14d4 1126 while (--*num_buf > 0) {
ef75cb51
HQ
1127 buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
1128 if (unlikely(!buf)) {
1129 pr_debug("%s: rx error: %d buffers out of %d missing\n",
1130 dev->name, *num_buf,
1131 virtio16_to_cpu(vi->vdev, hdr->num_buffers));
1132 dev->stats.rx_length_errors++;
1133 return -EINVAL;
1134 }
1135
1136 stats->bytes += len;
1137 page = virt_to_head_page(buf);
1138 offset = buf - page_address(page);
1139
1140 truesize = mergeable_ctx_to_truesize(ctx);
1141 headroom = mergeable_ctx_to_headroom(ctx);
1142 tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1143 room = SKB_DATA_ALIGN(headroom + tailroom);
1144
1145 cur_frag_size = truesize;
1146 xdp_frags_truesz += cur_frag_size;
1147 if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) {
1148 put_page(page);
1149 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1150 dev->name, len, (unsigned long)(truesize - room));
1151 dev->stats.rx_length_errors++;
1152 return -EINVAL;
1153 }
1154
1155 frag = &shinfo->frags[shinfo->nr_frags++];
1156 __skb_frag_set_page(frag, page);
1157 skb_frag_off_set(frag, offset);
1158 skb_frag_size_set(frag, len);
1159 if (page_is_pfmemalloc(page))
1160 xdp_buff_set_frag_pfmemalloc(xdp);
1161
1162 shinfo->xdp_frags_size += len;
1163 }
1164
1165 *xdp_frags_truesize = xdp_frags_truesz;
1166 return 0;
1167}
1168
8fc3b9e9 1169static struct sk_buff *receive_mergeable(struct net_device *dev,
fdd819b2 1170 struct virtnet_info *vi,
8fc3b9e9 1171 struct receive_queue *rq,
680557cf
MT
1172 void *buf,
1173 void *ctx,
186b3c99 1174 unsigned int len,
7d9d60fd 1175 unsigned int *xdp_xmit,
d46eeeaf 1176 struct virtnet_rq_stats *stats)
9ab86bbc 1177{
012873d0 1178 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
981f14d4 1179 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
8fc3b9e9
MT
1180 struct page *page = virt_to_head_page(buf);
1181 int offset = buf - page_address(page);
f600b690
JF
1182 struct sk_buff *head_skb, *curr_skb;
1183 struct bpf_prog *xdp_prog;
9ce6146e 1184 unsigned int truesize = mergeable_ctx_to_truesize(ctx);
4941d472 1185 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
ef75cb51
HQ
1186 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1187 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
22174f79 1188 unsigned int frame_sz, xdp_room;
9ce6146e 1189 int err;
f600b690 1190
56434a01 1191 head_skb = NULL;
d46eeeaf 1192 stats->bytes += len - vi->hdr_len;
56434a01 1193
ef75cb51 1194 if (unlikely(len > truesize - room)) {
ad993a95 1195 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
ef75cb51 1196 dev->name, len, (unsigned long)(truesize - room));
ad993a95
XY
1197 dev->stats.rx_length_errors++;
1198 goto err_skb;
1199 }
6213f07c
LR
1200
1201 if (likely(!vi->xdp_enabled)) {
1202 xdp_prog = NULL;
1203 goto skip_xdp;
1204 }
1205
f600b690
JF
1206 rcu_read_lock();
1207 xdp_prog = rcu_dereference(rq->xdp_prog);
1208 if (xdp_prog) {
22174f79
HQ
1209 unsigned int xdp_frags_truesz = 0;
1210 struct skb_shared_info *shinfo;
44fa2dbd 1211 struct xdp_frame *xdpf;
72979a6c 1212 struct page *xdp_page;
0354e4d1 1213 struct xdp_buff xdp;
0354e4d1 1214 void *data;
f600b690 1215 u32 act;
22174f79 1216 int i;
f600b690 1217
3d62b2a0
JW
1218 /* Transient failure which in theory could occur if
1219 * in-flight packets from before XDP was enabled reach
1220 * the receive path after XDP is loaded.
1221 */
1222 if (unlikely(hdr->hdr.gso_type))
1223 goto err_xdp;
1224
ef75cb51
HQ
1225 /* Now XDP core assumes frag size is PAGE_SIZE, but buffers
1226 * with headroom may add hole in truesize, which
1227 * make their length exceed PAGE_SIZE. So we disabled the
1228 * hole mechanism for xdp. See add_recvbuf_mergeable().
9ce6146e 1229 */
ef75cb51 1230 frame_sz = truesize;
9ce6146e 1231
22174f79
HQ
1232 /* This happens when headroom is not enough because
1233 * of the buffer was prefilled before XDP is set.
1234 * This should only happen for the first several packets.
1235 * In fact, vq reset can be used here to help us clean up
1236 * the prefilled buffers, but many existing devices do not
1237 * support it, and we don't want to bother users who are
1238 * using xdp normally.
3cc81a9a 1239 */
22174f79
HQ
1240 if (!xdp_prog->aux->xdp_has_frags &&
1241 (num_buf > 1 || headroom < virtnet_get_headroom(vi))) {
72979a6c 1242 /* linearize data for XDP */
56a86f84 1243 xdp_page = xdp_linearize_page(rq, &num_buf,
4941d472
JW
1244 page, offset,
1245 VIRTIO_XDP_HEADROOM,
1246 &len);
9ce6146e
JDB
1247 frame_sz = PAGE_SIZE;
1248
72979a6c
JF
1249 if (!xdp_page)
1250 goto err_xdp;
2de2f7f4 1251 offset = VIRTIO_XDP_HEADROOM;
22174f79
HQ
1252 } else if (unlikely(headroom < virtnet_get_headroom(vi))) {
1253 xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
1254 sizeof(struct skb_shared_info));
1255 if (len + xdp_room > PAGE_SIZE)
1256 goto err_xdp;
1257
1258 xdp_page = alloc_page(GFP_ATOMIC);
1259 if (!xdp_page)
1260 goto err_xdp;
1261
1262 memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM,
1263 page_address(page) + offset, len);
1264 frame_sz = PAGE_SIZE;
1265 offset = VIRTIO_XDP_HEADROOM;
72979a6c
JF
1266 } else {
1267 xdp_page = page;
f600b690
JF
1268 }
1269
0354e4d1 1270 data = page_address(xdp_page) + offset;
22174f79
HQ
1271 err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
1272 &num_buf, &xdp_frags_truesz, stats);
1273 if (unlikely(err))
1274 goto err_xdp_frags;
754b8a21 1275
0354e4d1 1276 act = bpf_prog_run_xdp(xdp_prog, &xdp);
d46eeeaf 1277 stats->xdp_packets++;
0354e4d1 1278
56434a01
JF
1279 switch (act) {
1280 case XDP_PASS:
1a3bd6ea
XZ
1281 head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
1282 if (unlikely(!head_skb))
1283 goto err_xdp_frags;
1284
fab89baf 1285 if (unlikely(xdp_page != page))
1830f893 1286 put_page(page);
fab89baf
HQ
1287 rcu_read_unlock();
1288 return head_skb;
56434a01 1289 case XDP_TX:
d46eeeaf 1290 stats->xdp_tx++;
1b698fa5 1291 xdpf = xdp_convert_buff_to_frame(&xdp);
7a542bee 1292 if (unlikely(!xdpf)) {
fab89baf
HQ
1293 netdev_dbg(dev, "convert buff to frame failed for xdp\n");
1294 goto err_xdp_frags;
7a542bee 1295 }
ca9e83b4 1296 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
fdc13979
LB
1297 if (unlikely(!err)) {
1298 xdp_return_frame_rx_napi(xdpf);
1299 } else if (unlikely(err < 0)) {
0354e4d1 1300 trace_xdp_exception(vi->dev, xdp_prog, act);
fab89baf 1301 goto err_xdp_frags;
11b7d897 1302 }
2471c75e 1303 *xdp_xmit |= VIRTIO_XDP_TX;
72979a6c 1304 if (unlikely(xdp_page != page))
5d458a13 1305 put_page(page);
56434a01
JF
1306 rcu_read_unlock();
1307 goto xdp_xmit;
3cc81a9a 1308 case XDP_REDIRECT:
d46eeeaf 1309 stats->xdp_redirects++;
3cc81a9a 1310 err = xdp_do_redirect(dev, &xdp, xdp_prog);
fab89baf
HQ
1311 if (err)
1312 goto err_xdp_frags;
2471c75e 1313 *xdp_xmit |= VIRTIO_XDP_REDIR;
3cc81a9a 1314 if (unlikely(xdp_page != page))
6890418b 1315 put_page(page);
3cc81a9a
JW
1316 rcu_read_unlock();
1317 goto xdp_xmit;
56434a01 1318 default:
c8064e5b 1319 bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
df561f66 1320 fallthrough;
0354e4d1
JF
1321 case XDP_ABORTED:
1322 trace_xdp_exception(vi->dev, xdp_prog, act);
df561f66 1323 fallthrough;
0354e4d1 1324 case XDP_DROP:
fab89baf 1325 goto err_xdp_frags;
56434a01 1326 }
22174f79
HQ
1327err_xdp_frags:
1328 if (unlikely(xdp_page != page))
1329 __free_pages(xdp_page, 0);
1330
1331 if (xdp_buff_has_frags(&xdp)) {
1332 shinfo = xdp_get_shared_info_from_buff(&xdp);
1333 for (i = 0; i < shinfo->nr_frags; i++) {
1334 xdp_page = skb_frag_page(&shinfo->frags[i]);
1335 put_page(xdp_page);
1336 }
1337 }
1338
1339 goto err_xdp;
f600b690
JF
1340 }
1341 rcu_read_unlock();
ab7db917 1342
6213f07c 1343skip_xdp:
fa0f1ba7 1344 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
f600b690 1345 curr_skb = head_skb;
9ab86bbc 1346
8fc3b9e9
MT
1347 if (unlikely(!curr_skb))
1348 goto err_skb;
9ab86bbc 1349 while (--num_buf) {
8fc3b9e9
MT
1350 int num_skb_frags;
1351
680557cf 1352 buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
03e9f8a0 1353 if (unlikely(!buf)) {
8fc3b9e9 1354 pr_debug("%s: rx error: %d buffers out of %d missing\n",
fdd819b2 1355 dev->name, num_buf,
012873d0
MT
1356 virtio16_to_cpu(vi->vdev,
1357 hdr->num_buffers));
8fc3b9e9
MT
1358 dev->stats.rx_length_errors++;
1359 goto err_buf;
3f2c31d9 1360 }
8fc3b9e9 1361
d46eeeaf 1362 stats->bytes += len;
8fc3b9e9 1363 page = virt_to_head_page(buf);
28b39bc7
JW
1364
1365 truesize = mergeable_ctx_to_truesize(ctx);
ef75cb51
HQ
1366 headroom = mergeable_ctx_to_headroom(ctx);
1367 tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1368 room = SKB_DATA_ALIGN(headroom + tailroom);
1369 if (unlikely(len > truesize - room)) {
56da5fd0 1370 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
ef75cb51 1371 dev->name, len, (unsigned long)(truesize - room));
680557cf
MT
1372 dev->stats.rx_length_errors++;
1373 goto err_skb;
1374 }
8fc3b9e9
MT
1375
1376 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
2613af0e
MD
1377 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
1378 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
8fc3b9e9
MT
1379
1380 if (unlikely(!nskb))
1381 goto err_skb;
2613af0e
MD
1382 if (curr_skb == head_skb)
1383 skb_shinfo(curr_skb)->frag_list = nskb;
1384 else
1385 curr_skb->next = nskb;
1386 curr_skb = nskb;
1387 head_skb->truesize += nskb->truesize;
1388 num_skb_frags = 0;
1389 }
1390 if (curr_skb != head_skb) {
1391 head_skb->data_len += len;
1392 head_skb->len += len;
fb51879d 1393 head_skb->truesize += truesize;
2613af0e 1394 }
8fc3b9e9 1395 offset = buf - page_address(page);
ba275241
JW
1396 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
1397 put_page(page);
1398 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
fb51879d 1399 len, truesize);
ba275241
JW
1400 } else {
1401 skb_add_rx_frag(curr_skb, num_skb_frags, page,
fb51879d 1402 offset, len, truesize);
ba275241 1403 }
8fc3b9e9
MT
1404 }
1405
5377d758 1406 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
8fc3b9e9
MT
1407 return head_skb;
1408
f600b690
JF
1409err_xdp:
1410 rcu_read_unlock();
d46eeeaf 1411 stats->xdp_drops++;
8fc3b9e9
MT
1412err_skb:
1413 put_page(page);
850e088d 1414 while (num_buf-- > 1) {
680557cf
MT
1415 buf = virtqueue_get_buf(rq->vq, &len);
1416 if (unlikely(!buf)) {
8fc3b9e9
MT
1417 pr_debug("%s: rx error: %d buffers missing\n",
1418 dev->name, num_buf);
1419 dev->stats.rx_length_errors++;
1420 break;
1421 }
d46eeeaf 1422 stats->bytes += len;
680557cf 1423 page = virt_to_head_page(buf);
8fc3b9e9 1424 put_page(page);
9ab86bbc 1425 }
8fc3b9e9 1426err_buf:
d46eeeaf 1427 stats->drops++;
8fc3b9e9 1428 dev_kfree_skb(head_skb);
56434a01 1429xdp_xmit:
8fc3b9e9 1430 return NULL;
9ab86bbc
SM
1431}
1432
91f41f01
AM
1433static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
1434 struct sk_buff *skb)
1435{
1436 enum pkt_hash_types rss_hash_type;
1437
1438 if (!hdr_hash || !skb)
1439 return;
1440
95bb6330 1441 switch (__le16_to_cpu(hdr_hash->hash_report)) {
91f41f01
AM
1442 case VIRTIO_NET_HASH_REPORT_TCPv4:
1443 case VIRTIO_NET_HASH_REPORT_UDPv4:
1444 case VIRTIO_NET_HASH_REPORT_TCPv6:
1445 case VIRTIO_NET_HASH_REPORT_UDPv6:
1446 case VIRTIO_NET_HASH_REPORT_TCPv6_EX:
1447 case VIRTIO_NET_HASH_REPORT_UDPv6_EX:
1448 rss_hash_type = PKT_HASH_TYPE_L4;
1449 break;
1450 case VIRTIO_NET_HASH_REPORT_IPv4:
1451 case VIRTIO_NET_HASH_REPORT_IPv6:
1452 case VIRTIO_NET_HASH_REPORT_IPv6_EX:
1453 rss_hash_type = PKT_HASH_TYPE_L3;
1454 break;
1455 case VIRTIO_NET_HASH_REPORT_NONE:
1456 default:
1457 rss_hash_type = PKT_HASH_TYPE_NONE;
1458 }
95bb6330 1459 skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
91f41f01
AM
1460}
1461
7d9d60fd
TM
1462static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
1463 void *buf, unsigned int len, void **ctx,
a0929a44 1464 unsigned int *xdp_xmit,
d46eeeaf 1465 struct virtnet_rq_stats *stats)
9ab86bbc 1466{
e9d7417b 1467 struct net_device *dev = vi->dev;
9ab86bbc 1468 struct sk_buff *skb;
012873d0 1469 struct virtio_net_hdr_mrg_rxbuf *hdr;
3f2c31d9 1470
bcff3162 1471 if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
9ab86bbc
SM
1472 pr_debug("%s: short packet %i\n", dev->name, len);
1473 dev->stats.rx_length_errors++;
eb1d929f 1474 virtnet_rq_free_unused_buf(rq->vq, buf);
7d9d60fd 1475 return;
9ab86bbc 1476 }
3f2c31d9 1477
f121159d 1478 if (vi->mergeable_rx_bufs)
7d9d60fd 1479 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
a0929a44 1480 stats);
f121159d 1481 else if (vi->big_packets)
a0929a44 1482 skb = receive_big(dev, vi, rq, buf, len, stats);
f121159d 1483 else
a0929a44 1484 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
f121159d
MT
1485
1486 if (unlikely(!skb))
7d9d60fd 1487 return;
3f2c31d9 1488
9ab86bbc 1489 hdr = skb_vnet_hdr(skb);
91f41f01
AM
1490 if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
1491 virtio_skb_set_hash((const struct virtio_net_hdr_v1_hash *)hdr, skb);
3fa2a1df 1492
e858fae2 1493 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
10a8d94a 1494 skb->ip_summed = CHECKSUM_UNNECESSARY;
296f96fc 1495
e858fae2
MR
1496 if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
1497 virtio_is_little_endian(vi->vdev))) {
1498 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
1499 dev->name, hdr->hdr.gso_type,
1500 hdr->hdr.gso_size);
1501 goto frame_err;
296f96fc
RR
1502 }
1503
133bbb18 1504 skb_record_rx_queue(skb, vq2rxq(rq->vq));
d1dc06dc
MR
1505 skb->protocol = eth_type_trans(skb, dev);
1506 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
1507 ntohs(skb->protocol), skb->len, skb->pkt_type);
1508
0fbd050a 1509 napi_gro_receive(&rq->napi, skb);
7d9d60fd 1510 return;
296f96fc
RR
1511
1512frame_err:
1513 dev->stats.rx_frame_errors++;
296f96fc
RR
1514 dev_kfree_skb(skb);
1515}
1516
192f68cf
JW
1517/* Unlike mergeable buffers, all buffers are allocated to the
1518 * same size, except for the headroom. For this reason we do
1519 * not need to use mergeable_len_to_ctx here - it is enough
1520 * to store the headroom as the context ignoring the truesize.
1521 */
946fa564
MT
1522static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
1523 gfp_t gfp)
296f96fc 1524{
f6b10209
JW
1525 struct page_frag *alloc_frag = &rq->alloc_frag;
1526 char *buf;
2de2f7f4 1527 unsigned int xdp_headroom = virtnet_get_headroom(vi);
192f68cf 1528 void *ctx = (void *)(unsigned long)xdp_headroom;
f6b10209 1529 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
9ab86bbc 1530 int err;
3f2c31d9 1531
f6b10209
JW
1532 len = SKB_DATA_ALIGN(len) +
1533 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1534 if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
9ab86bbc 1535 return -ENOMEM;
296f96fc 1536
f6b10209
JW
1537 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1538 get_page(alloc_frag->page);
1539 alloc_frag->offset += len;
1540 sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom,
1541 vi->hdr_len + GOOD_PACKET_LEN);
192f68cf 1542 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
9ab86bbc 1543 if (err < 0)
f6b10209 1544 put_page(virt_to_head_page(buf));
9ab86bbc
SM
1545 return err;
1546}
97402b96 1547
012873d0
MT
1548static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
1549 gfp_t gfp)
9ab86bbc 1550{
9ab86bbc
SM
1551 struct page *first, *list = NULL;
1552 char *p;
1553 int i, err, offset;
1554
4959aebb 1555 sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
a5835440 1556
4959aebb
GL
1557 /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
1558 for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) {
e9d7417b 1559 first = get_a_page(rq, gfp);
9ab86bbc
SM
1560 if (!first) {
1561 if (list)
e9d7417b 1562 give_pages(rq, list);
9ab86bbc 1563 return -ENOMEM;
97402b96 1564 }
e9d7417b 1565 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
97402b96 1566
9ab86bbc
SM
1567 /* chain new page in list head to match sg */
1568 first->private = (unsigned long)list;
1569 list = first;
1570 }
296f96fc 1571
e9d7417b 1572 first = get_a_page(rq, gfp);
9ab86bbc 1573 if (!first) {
e9d7417b 1574 give_pages(rq, list);
9ab86bbc
SM
1575 return -ENOMEM;
1576 }
1577 p = page_address(first);
1578
e9d7417b 1579 /* rq->sg[0], rq->sg[1] share the same page */
012873d0
MT
1580 /* a separated rq->sg[0] for header - required in case !any_header_sg */
1581 sg_set_buf(&rq->sg[0], p, vi->hdr_len);
9ab86bbc 1582
e9d7417b 1583 /* rq->sg[1] for data packet, from offset */
9ab86bbc 1584 offset = sizeof(struct padded_vnet_hdr);
e9d7417b 1585 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
9ab86bbc
SM
1586
1587 /* chain first in list head */
1588 first->private = (unsigned long)list;
4959aebb 1589 err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
9dc7b9e4 1590 first, gfp);
9ab86bbc 1591 if (err < 0)
e9d7417b 1592 give_pages(rq, first);
9ab86bbc
SM
1593
1594 return err;
296f96fc
RR
1595}
1596
d85b758f 1597static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
3cc81a9a
JW
1598 struct ewma_pkt_len *avg_pkt_len,
1599 unsigned int room)
3f2c31d9 1600{
c1ddc42d
AM
1601 struct virtnet_info *vi = rq->vq->vdev->priv;
1602 const size_t hdr_len = vi->hdr_len;
fbf28d78
MD
1603 unsigned int len;
1604
3cc81a9a
JW
1605 if (room)
1606 return PAGE_SIZE - room;
1607
1608 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
f0c3192c 1609 rq->min_buf_len, PAGE_SIZE - hdr_len);
3cc81a9a 1610
e377fcc8 1611 return ALIGN(len, L1_CACHE_BYTES);
fbf28d78
MD
1612}
1613
2de2f7f4
JF
1614static int add_recvbuf_mergeable(struct virtnet_info *vi,
1615 struct receive_queue *rq, gfp_t gfp)
fbf28d78 1616{
fb51879d 1617 struct page_frag *alloc_frag = &rq->alloc_frag;
2de2f7f4 1618 unsigned int headroom = virtnet_get_headroom(vi);
3cc81a9a
JW
1619 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1620 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
fb51879d 1621 char *buf;
680557cf 1622 void *ctx;
3f2c31d9 1623 int err;
fb51879d 1624 unsigned int len, hole;
3f2c31d9 1625
3cc81a9a
JW
1626 /* Extra tailroom is needed to satisfy XDP's assumption. This
1627 * means rx frags coalescing won't work, but consider we've
1628 * disabled GSO for XDP, it won't be a big issue.
1629 */
1630 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
1631 if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
9ab86bbc 1632 return -ENOMEM;
ab7db917 1633
fb51879d 1634 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
2de2f7f4 1635 buf += headroom; /* advance address leaving hole at front of pkt */
fb51879d 1636 get_page(alloc_frag->page);
3cc81a9a 1637 alloc_frag->offset += len + room;
fb51879d 1638 hole = alloc_frag->size - alloc_frag->offset;
3cc81a9a 1639 if (hole < len + room) {
ab7db917
MD
1640 /* To avoid internal fragmentation, if there is very likely not
1641 * enough space for another buffer, add the remaining space to
1daa8790 1642 * the current buffer.
484beac2
HQ
1643 * XDP core assumes that frame_size of xdp_buff and the length
1644 * of the frag are PAGE_SIZE, so we disable the hole mechanism.
ab7db917 1645 */
484beac2
HQ
1646 if (!headroom)
1647 len += hole;
fb51879d
MD
1648 alloc_frag->offset += hole;
1649 }
3f2c31d9 1650
fb51879d 1651 sg_init_one(rq->sg, buf, len);
ef75cb51 1652 ctx = mergeable_len_to_ctx(len + room, headroom);
680557cf 1653 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
9ab86bbc 1654 if (err < 0)
2613af0e 1655 put_page(virt_to_head_page(buf));
3f2c31d9 1656
9ab86bbc
SM
1657 return err;
1658}
3f2c31d9 1659
b2baed69
RR
1660/*
1661 * Returns false if we couldn't fill entirely (OOM).
1662 *
1663 * Normally run in the receive path, but can also be run from ndo_open
1664 * before we're receiving packets, or from refill_work which is
1665 * careful to disable receiving (using napi_disable).
1666 */
946fa564
MT
1667static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
1668 gfp_t gfp)
9ab86bbc
SM
1669{
1670 int err;
1788f495 1671 bool oom;
3f2c31d9 1672
9ab86bbc
SM
1673 do {
1674 if (vi->mergeable_rx_bufs)
2de2f7f4 1675 err = add_recvbuf_mergeable(vi, rq, gfp);
9ab86bbc 1676 else if (vi->big_packets)
012873d0 1677 err = add_recvbuf_big(vi, rq, gfp);
9ab86bbc 1678 else
946fa564 1679 err = add_recvbuf_small(vi, rq, gfp);
3f2c31d9 1680
1788f495 1681 oom = err == -ENOMEM;
9ed4cb07 1682 if (err)
3f2c31d9 1683 break;
b7dfde95 1684 } while (rq->vq->num_free);
461f03dc 1685 if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
01c32598
MT
1686 unsigned long flags;
1687
1688 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
d46eeeaf 1689 rq->stats.kicks++;
01c32598 1690 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
461f03dc
TM
1691 }
1692
3161e453 1693 return !oom;
3f2c31d9
MM
1694}
1695
18445c4d 1696static void skb_recv_done(struct virtqueue *rvq)
296f96fc
RR
1697{
1698 struct virtnet_info *vi = rvq->vdev->priv;
986a4f4d 1699 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
e9d7417b 1700
e4e8452a 1701 virtqueue_napi_schedule(&rq->napi, rvq);
296f96fc
RR
1702}
1703
e4e8452a 1704static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
3e9d08ec 1705{
e4e8452a 1706 napi_enable(napi);
3e9d08ec
BR
1707
1708 /* If all buffers were filled by other side before we napi_enabled, we
e4e8452a
WB
1709 * won't get another interrupt, so process any outstanding packets now.
1710 * Call local_bh_enable after to trigger softIRQ processing.
1711 */
1712 local_bh_disable();
1713 virtqueue_napi_schedule(napi, vq);
1714 local_bh_enable();
3e9d08ec
BR
1715}
1716
b92f1e67
WB
1717static void virtnet_napi_tx_enable(struct virtnet_info *vi,
1718 struct virtqueue *vq,
1719 struct napi_struct *napi)
1720{
1721 if (!napi->weight)
1722 return;
1723
1724 /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
1725 * enable the feature if this is likely affine with the transmit path.
1726 */
1727 if (!vi->affinity_hint_set) {
1728 napi->weight = 0;
1729 return;
1730 }
1731
1732 return virtnet_napi_enable(vq, napi);
1733}
1734
78a57b48
WB
1735static void virtnet_napi_tx_disable(struct napi_struct *napi)
1736{
1737 if (napi->weight)
1738 napi_disable(napi);
1739}
1740
3161e453
RR
1741static void refill_work(struct work_struct *work)
1742{
e9d7417b
JW
1743 struct virtnet_info *vi =
1744 container_of(work, struct virtnet_info, refill.work);
3161e453 1745 bool still_empty;
986a4f4d
JW
1746 int i;
1747
55257d72 1748 for (i = 0; i < vi->curr_queue_pairs; i++) {
986a4f4d 1749 struct receive_queue *rq = &vi->rq[i];
3161e453 1750
986a4f4d 1751 napi_disable(&rq->napi);
946fa564 1752 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
e4e8452a 1753 virtnet_napi_enable(rq->vq, &rq->napi);
3161e453 1754
986a4f4d
JW
1755 /* In theory, this can happen: if we don't get any buffers in
1756 * we will *never* try to fill again.
1757 */
1758 if (still_empty)
1759 schedule_delayed_work(&vi->refill, HZ/2);
1760 }
3161e453
RR
1761}
1762
2471c75e
JDB
1763static int virtnet_receive(struct receive_queue *rq, int budget,
1764 unsigned int *xdp_xmit)
296f96fc 1765{
e9d7417b 1766 struct virtnet_info *vi = rq->vq->vdev->priv;
d46eeeaf 1767 struct virtnet_rq_stats stats = {};
a0929a44 1768 unsigned int len;
9ab86bbc 1769 void *buf;
a0929a44 1770 int i;
296f96fc 1771
192f68cf 1772 if (!vi->big_packets || vi->mergeable_rx_bufs) {
680557cf
MT
1773 void *ctx;
1774
d46eeeaf 1775 while (stats.packets < budget &&
680557cf 1776 (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
a0929a44 1777 receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
d46eeeaf 1778 stats.packets++;
680557cf
MT
1779 }
1780 } else {
d46eeeaf 1781 while (stats.packets < budget &&
680557cf 1782 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
a0929a44 1783 receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
d46eeeaf 1784 stats.packets++;
680557cf 1785 }
296f96fc
RR
1786 }
1787
718be6ba 1788 if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
5a159128
JW
1789 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
1790 spin_lock(&vi->refill_lock);
1791 if (vi->refill_enabled)
1792 schedule_delayed_work(&vi->refill, 0);
1793 spin_unlock(&vi->refill_lock);
1794 }
3161e453 1795 }
296f96fc 1796
d7dfc5cf 1797 u64_stats_update_begin(&rq->stats.syncp);
a0929a44
TM
1798 for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
1799 size_t offset = virtnet_rq_stats_desc[i].offset;
1800 u64 *item;
1801
d46eeeaf
JW
1802 item = (u64 *)((u8 *)&rq->stats + offset);
1803 *item += *(u64 *)((u8 *)&stats + offset);
a0929a44 1804 }
d7dfc5cf 1805 u64_stats_update_end(&rq->stats.syncp);
61845d20 1806
d46eeeaf 1807 return stats.packets;
2ffa7598
JW
1808}
1809
7b0411ef
WB
1810static void virtnet_poll_cleantx(struct receive_queue *rq)
1811{
1812 struct virtnet_info *vi = rq->vq->vdev->priv;
1813 unsigned int index = vq2rxq(rq->vq);
1814 struct send_queue *sq = &vi->sq[index];
1815 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
1816
534da5e8 1817 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
7b0411ef
WB
1818 return;
1819
1820 if (__netif_tx_trylock(txq)) {
ebcce492
XZ
1821 if (sq->reset) {
1822 __netif_tx_unlock(txq);
1823 return;
1824 }
1825
a7766ef1
MT
1826 do {
1827 virtqueue_disable_cb(sq->vq);
1828 free_old_xmit_skbs(sq, true);
1829 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
22bc63c5
MT
1830
1831 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
1832 netif_tx_wake_queue(txq);
1833
7b0411ef
WB
1834 __netif_tx_unlock(txq);
1835 }
7b0411ef
WB
1836}
1837
2ffa7598
JW
1838static int virtnet_poll(struct napi_struct *napi, int budget)
1839{
1840 struct receive_queue *rq =
1841 container_of(napi, struct receive_queue, napi);
9267c430
JW
1842 struct virtnet_info *vi = rq->vq->vdev->priv;
1843 struct send_queue *sq;
2a43565c 1844 unsigned int received;
2471c75e 1845 unsigned int xdp_xmit = 0;
2ffa7598 1846
7b0411ef
WB
1847 virtnet_poll_cleantx(rq);
1848
186b3c99 1849 received = virtnet_receive(rq, budget, &xdp_xmit);
2ffa7598 1850
ad7e615f
MK
1851 if (xdp_xmit & VIRTIO_XDP_REDIR)
1852 xdp_do_flush();
1853
8329d98e 1854 /* Out of packets? */
e4e8452a
WB
1855 if (received < budget)
1856 virtqueue_napi_complete(napi, rq->vq, received);
296f96fc 1857
2471c75e 1858 if (xdp_xmit & VIRTIO_XDP_TX) {
97c2c69e 1859 sq = virtnet_xdp_get_sq(vi);
461f03dc
TM
1860 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
1861 u64_stats_update_begin(&sq->stats.syncp);
1862 sq->stats.kicks++;
1863 u64_stats_update_end(&sq->stats.syncp);
1864 }
97c2c69e 1865 virtnet_xdp_put_sq(vi, sq);
9267c430 1866 }
186b3c99 1867
296f96fc
RR
1868 return received;
1869}
1870
986a4f4d
JW
1871static int virtnet_open(struct net_device *dev)
1872{
1873 struct virtnet_info *vi = netdev_priv(dev);
754b8a21 1874 int i, err;
986a4f4d 1875
5a159128
JW
1876 enable_delayed_refill(vi);
1877
e4166625
JW
1878 for (i = 0; i < vi->max_queue_pairs; i++) {
1879 if (i < vi->curr_queue_pairs)
1880 /* Make sure we have some buffers: if oom use wq. */
946fa564 1881 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
e4166625 1882 schedule_delayed_work(&vi->refill, 0);
754b8a21 1883
b02e5a0e 1884 err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id);
754b8a21
JDB
1885 if (err < 0)
1886 return err;
1887
8d5d8852
JDB
1888 err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq,
1889 MEM_TYPE_PAGE_SHARED, NULL);
1890 if (err < 0) {
1891 xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
1892 return err;
1893 }
1894
e4e8452a 1895 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
b92f1e67 1896 virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
986a4f4d
JW
1897 }
1898
1899 return 0;
1900}
1901
b92f1e67
WB
1902static int virtnet_poll_tx(struct napi_struct *napi, int budget)
1903{
1904 struct send_queue *sq = container_of(napi, struct send_queue, napi);
1905 struct virtnet_info *vi = sq->vq->vdev->priv;
534da5e8
TM
1906 unsigned int index = vq2txq(sq->vq);
1907 struct netdev_queue *txq;
5a2f966d
MT
1908 int opaque;
1909 bool done;
b92f1e67 1910
534da5e8
TM
1911 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
1912 /* We don't need to enable cb for XDP */
1913 napi_complete_done(napi, 0);
1914 return 0;
1915 }
1916
1917 txq = netdev_get_tx_queue(vi->dev, index);
b92f1e67 1918 __netif_tx_lock(txq, raw_smp_processor_id());
5a2f966d 1919 virtqueue_disable_cb(sq->vq);
df133f3f 1920 free_old_xmit_skbs(sq, true);
5a2f966d 1921
22bc63c5
MT
1922 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
1923 netif_tx_wake_queue(txq);
1924
5a2f966d
MT
1925 opaque = virtqueue_enable_cb_prepare(sq->vq);
1926
1927 done = napi_complete_done(napi, 0);
1928
1929 if (!done)
1930 virtqueue_disable_cb(sq->vq);
1931
b92f1e67
WB
1932 __netif_tx_unlock(txq);
1933
5a2f966d
MT
1934 if (done) {
1935 if (unlikely(virtqueue_poll(sq->vq, opaque))) {
1936 if (napi_schedule_prep(napi)) {
1937 __netif_tx_lock(txq, raw_smp_processor_id());
1938 virtqueue_disable_cb(sq->vq);
1939 __netif_tx_unlock(txq);
1940 __napi_schedule(napi);
1941 }
1942 }
1943 }
b92f1e67 1944
b92f1e67
WB
1945 return 0;
1946}
1947
e9d7417b 1948static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
296f96fc 1949{
012873d0 1950 struct virtio_net_hdr_mrg_rxbuf *hdr;
296f96fc 1951 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
e9d7417b 1952 struct virtnet_info *vi = sq->vq->vdev->priv;
e2fcad58 1953 int num_sg;
012873d0 1954 unsigned hdr_len = vi->hdr_len;
e7428e95 1955 bool can_push;
296f96fc 1956
e174961c 1957 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
e7428e95
MT
1958
1959 can_push = vi->any_header_sg &&
1960 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
1961 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
1962 /* Even if we can, don't push here yet as this would skew
1963 * csum_start offset below. */
1964 if (can_push)
012873d0 1965 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
e7428e95
MT
1966 else
1967 hdr = skb_vnet_hdr(skb);
296f96fc 1968
e858fae2 1969 if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
fd3a8862
WB
1970 virtio_is_little_endian(vi->vdev), false,
1971 0))
85eb1389 1972 return -EPROTO;
296f96fc 1973
3f2c31d9 1974 if (vi->mergeable_rx_bufs)
012873d0 1975 hdr->num_buffers = 0;
3f2c31d9 1976
547c890c 1977 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
e7428e95
MT
1978 if (can_push) {
1979 __skb_push(skb, hdr_len);
1980 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
e2fcad58
JD
1981 if (unlikely(num_sg < 0))
1982 return num_sg;
e7428e95
MT
1983 /* Pull header back to avoid skew in tx bytes calculations. */
1984 __skb_pull(skb, hdr_len);
1985 } else {
1986 sg_set_buf(sq->sg, hdr, hdr_len);
e2fcad58
JD
1987 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
1988 if (unlikely(num_sg < 0))
1989 return num_sg;
1990 num_sg++;
e7428e95 1991 }
9dc7b9e4 1992 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
11a3a154
RR
1993}
1994
424efe9c 1995static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
99ffc696
RR
1996{
1997 struct virtnet_info *vi = netdev_priv(dev);
986a4f4d
JW
1998 int qnum = skb_get_queue_mapping(skb);
1999 struct send_queue *sq = &vi->sq[qnum];
9ed4cb07 2000 int err;
4b7fd2e6 2001 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
6b16f9ee 2002 bool kick = !netdev_xmit_more();
b92f1e67 2003 bool use_napi = sq->napi.weight;
2cb9c6ba 2004
2cb9c6ba 2005 /* Free up any pending old buffers before queueing new ones. */
a7766ef1
MT
2006 do {
2007 if (use_napi)
2008 virtqueue_disable_cb(sq->vq);
2009
2010 free_old_xmit_skbs(sq, false);
99ffc696 2011
a7766ef1
MT
2012 } while (use_napi && kick &&
2013 unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
bdb12e0d 2014
074c3582
JK
2015 /* timestamp packet in software */
2016 skb_tx_timestamp(skb);
2017
03f191ba 2018 /* Try to transmit */
b7dfde95 2019 err = xmit_skb(sq, skb);
48925e37 2020
9ed4cb07 2021 /* This should not happen! */
681daee2 2022 if (unlikely(err)) {
9ed4cb07
RR
2023 dev->stats.tx_fifo_errors++;
2024 if (net_ratelimit())
2025 dev_warn(&dev->dev,
7934b481
YS
2026 "Unexpected TXQ (%d) queue failure: %d\n",
2027 qnum, err);
58eba97d 2028 dev->stats.tx_dropped++;
85e94525 2029 dev_kfree_skb_any(skb);
58eba97d 2030 return NETDEV_TX_OK;
296f96fc 2031 }
03f191ba 2032
48925e37 2033 /* Don't wait up for transmitted skbs to be freed. */
b92f1e67
WB
2034 if (!use_napi) {
2035 skb_orphan(skb);
895b5c9f 2036 nf_reset_ct(skb);
b92f1e67 2037 }
48925e37 2038
b8ef4809 2039 check_sq_full_and_disable(vi, dev, sq);
48925e37 2040
461f03dc
TM
2041 if (kick || netif_xmit_stopped(txq)) {
2042 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2043 u64_stats_update_begin(&sq->stats.syncp);
2044 sq->stats.kicks++;
2045 u64_stats_update_end(&sq->stats.syncp);
2046 }
2047 }
296f96fc 2048
0b725a2c 2049 return NETDEV_TX_OK;
c223a078
DM
2050}
2051
6a4763e2
XZ
2052static int virtnet_rx_resize(struct virtnet_info *vi,
2053 struct receive_queue *rq, u32 ring_num)
2054{
2055 bool running = netif_running(vi->dev);
2056 int err, qindex;
2057
2058 qindex = rq - vi->rq;
2059
2060 if (running)
2061 napi_disable(&rq->napi);
2062
2063 err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
2064 if (err)
2065 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
2066
2067 if (!try_fill_recv(vi, rq, GFP_KERNEL))
2068 schedule_delayed_work(&vi->refill, 0);
2069
2070 if (running)
2071 virtnet_napi_enable(rq->vq, &rq->napi);
2072 return err;
2073}
2074
ebcce492
XZ
2075static int virtnet_tx_resize(struct virtnet_info *vi,
2076 struct send_queue *sq, u32 ring_num)
2077{
2078 bool running = netif_running(vi->dev);
2079 struct netdev_queue *txq;
2080 int err, qindex;
2081
2082 qindex = sq - vi->sq;
2083
2084 if (running)
2085 virtnet_napi_tx_disable(&sq->napi);
2086
2087 txq = netdev_get_tx_queue(vi->dev, qindex);
2088
2089 /* 1. wait all ximt complete
2090 * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
2091 */
2092 __netif_tx_lock_bh(txq);
2093
2094 /* Prevent rx poll from accessing sq. */
2095 sq->reset = true;
2096
2097 /* Prevent the upper layer from trying to send packets. */
2098 netif_stop_subqueue(vi->dev, qindex);
2099
2100 __netif_tx_unlock_bh(txq);
2101
2102 err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
2103 if (err)
2104 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
2105
2106 __netif_tx_lock_bh(txq);
2107 sq->reset = false;
2108 netif_tx_wake_queue(txq);
2109 __netif_tx_unlock_bh(txq);
2110
2111 if (running)
2112 virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
2113 return err;
2114}
2115
40cbfc37
AK
2116/*
2117 * Send command via the control virtqueue and check status. Commands
2118 * supported by the hypervisor, as indicated by feature bits, should
788a8b6d 2119 * never fail unless improperly formatted.
40cbfc37
AK
2120 */
2121static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
d24bae32 2122 struct scatterlist *out)
40cbfc37 2123{
f7bc9594 2124 struct scatterlist *sgs[4], hdr, stat;
d24bae32 2125 unsigned out_num = 0, tmp;
222722bc 2126 int ret;
40cbfc37
AK
2127
2128 /* Caller should know better */
f7bc9594 2129 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
40cbfc37 2130
12e57169
MT
2131 vi->ctrl->status = ~0;
2132 vi->ctrl->hdr.class = class;
2133 vi->ctrl->hdr.cmd = cmd;
f7bc9594 2134 /* Add header */
12e57169 2135 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
f7bc9594 2136 sgs[out_num++] = &hdr;
40cbfc37 2137
f7bc9594
RR
2138 if (out)
2139 sgs[out_num++] = out;
40cbfc37 2140
f7bc9594 2141 /* Add return status. */
12e57169 2142 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
d24bae32 2143 sgs[out_num] = &stat;
40cbfc37 2144
d24bae32 2145 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
222722bc
YW
2146 ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
2147 if (ret < 0) {
2148 dev_warn(&vi->vdev->dev,
2149 "Failed to add sgs for command vq: %d\n.", ret);
2150 return false;
2151 }
40cbfc37 2152
67975901 2153 if (unlikely(!virtqueue_kick(vi->cvq)))
12e57169 2154 return vi->ctrl->status == VIRTIO_NET_OK;
40cbfc37
AK
2155
2156 /* Spin for a response, the kick causes an ioport write, trapping
2157 * into the hypervisor, so the request should be handled immediately.
2158 */
047b9b94
HG
2159 while (!virtqueue_get_buf(vi->cvq, &tmp) &&
2160 !virtqueue_is_broken(vi->cvq))
40cbfc37
AK
2161 cpu_relax();
2162
12e57169 2163 return vi->ctrl->status == VIRTIO_NET_OK;
40cbfc37
AK
2164}
2165
9c46f6d4
AW
2166static int virtnet_set_mac_address(struct net_device *dev, void *p)
2167{
2168 struct virtnet_info *vi = netdev_priv(dev);
2169 struct virtio_device *vdev = vi->vdev;
f2f2c8b4 2170 int ret;
e37e2ff3 2171 struct sockaddr *addr;
7e58d5ae 2172 struct scatterlist sg;
9c46f6d4 2173
ba5e4426
SS
2174 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
2175 return -EOPNOTSUPP;
2176
801822d1 2177 addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
e37e2ff3
AL
2178 if (!addr)
2179 return -ENOMEM;
e37e2ff3
AL
2180
2181 ret = eth_prepare_mac_addr_change(dev, addr);
f2f2c8b4 2182 if (ret)
e37e2ff3 2183 goto out;
9c46f6d4 2184
7e58d5ae
AK
2185 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
2186 sg_init_one(&sg, addr->sa_data, dev->addr_len);
2187 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
d24bae32 2188 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
7e58d5ae
AK
2189 dev_warn(&vdev->dev,
2190 "Failed to set mac address by vq command.\n");
e37e2ff3
AL
2191 ret = -EINVAL;
2192 goto out;
7e58d5ae 2193 }
7e93a02f
MT
2194 } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
2195 !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
855e0c52
RR
2196 unsigned int i;
2197
2198 /* Naturally, this has an atomicity problem. */
2199 for (i = 0; i < dev->addr_len; i++)
2200 virtio_cwrite8(vdev,
2201 offsetof(struct virtio_net_config, mac) +
2202 i, addr->sa_data[i]);
7e58d5ae
AK
2203 }
2204
2205 eth_commit_mac_addr_change(dev, p);
e37e2ff3 2206 ret = 0;
9c46f6d4 2207
e37e2ff3
AL
2208out:
2209 kfree(addr);
2210 return ret;
9c46f6d4
AW
2211}
2212
bc1f4470 2213static void virtnet_stats(struct net_device *dev,
2214 struct rtnl_link_stats64 *tot)
3fa2a1df 2215{
2216 struct virtnet_info *vi = netdev_priv(dev);
3fa2a1df 2217 unsigned int start;
d7dfc5cf 2218 int i;
3fa2a1df 2219
d7dfc5cf 2220 for (i = 0; i < vi->max_queue_pairs; i++) {
a520794b 2221 u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops;
d7dfc5cf
TM
2222 struct receive_queue *rq = &vi->rq[i];
2223 struct send_queue *sq = &vi->sq[i];
3fa2a1df 2224
2225 do {
068c38ad 2226 start = u64_stats_fetch_begin(&sq->stats.syncp);
d7dfc5cf
TM
2227 tpackets = sq->stats.packets;
2228 tbytes = sq->stats.bytes;
a520794b 2229 terrors = sq->stats.tx_timeouts;
068c38ad 2230 } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
83a27052
ED
2231
2232 do {
068c38ad 2233 start = u64_stats_fetch_begin(&rq->stats.syncp);
d46eeeaf
JW
2234 rpackets = rq->stats.packets;
2235 rbytes = rq->stats.bytes;
2236 rdrops = rq->stats.drops;
068c38ad 2237 } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
3fa2a1df 2238
2239 tot->rx_packets += rpackets;
2240 tot->tx_packets += tpackets;
2241 tot->rx_bytes += rbytes;
2242 tot->tx_bytes += tbytes;
2c4a2f7d 2243 tot->rx_dropped += rdrops;
a520794b 2244 tot->tx_errors += terrors;
3fa2a1df 2245 }
2246
2247 tot->tx_dropped = dev->stats.tx_dropped;
021ac8d3 2248 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
3fa2a1df 2249 tot->rx_length_errors = dev->stats.rx_length_errors;
2250 tot->rx_frame_errors = dev->stats.rx_frame_errors;
3fa2a1df 2251}
2252
586d17c5
JW
2253static void virtnet_ack_link_announce(struct virtnet_info *vi)
2254{
2255 rtnl_lock();
2256 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
d24bae32 2257 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
586d17c5
JW
2258 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
2259 rtnl_unlock();
2260}
2261
47315329 2262static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
986a4f4d
JW
2263{
2264 struct scatterlist sg;
986a4f4d
JW
2265 struct net_device *dev = vi->dev;
2266
2267 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
2268 return 0;
2269
12e57169
MT
2270 vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
2271 sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
986a4f4d
JW
2272
2273 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
d24bae32 2274 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
986a4f4d
JW
2275 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
2276 queue_pairs);
2277 return -EINVAL;
55257d72 2278 } else {
986a4f4d 2279 vi->curr_queue_pairs = queue_pairs;
35ed159b
JW
2280 /* virtnet_open() will refill when device is going to up. */
2281 if (dev->flags & IFF_UP)
2282 schedule_delayed_work(&vi->refill, 0);
55257d72 2283 }
986a4f4d
JW
2284
2285 return 0;
2286}
2287
47315329
JF
2288static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
2289{
2290 int err;
2291
2292 rtnl_lock();
2293 err = _virtnet_set_queues(vi, queue_pairs);
2294 rtnl_unlock();
2295 return err;
2296}
2297
296f96fc
RR
2298static int virtnet_close(struct net_device *dev)
2299{
2300 struct virtnet_info *vi = netdev_priv(dev);
986a4f4d 2301 int i;
296f96fc 2302
5a159128
JW
2303 /* Make sure NAPI doesn't schedule refill work */
2304 disable_delayed_refill(vi);
b2baed69
RR
2305 /* Make sure refill_work doesn't re-enable napi! */
2306 cancel_delayed_work_sync(&vi->refill);
986a4f4d 2307
b92f1e67 2308 for (i = 0; i < vi->max_queue_pairs; i++) {
27369c9c 2309 virtnet_napi_tx_disable(&vi->sq[i].napi);
986a4f4d 2310 napi_disable(&vi->rq[i].napi);
63b11404 2311 xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
b92f1e67 2312 }
296f96fc 2313
296f96fc
RR
2314 return 0;
2315}
2316
2af7698e
AW
2317static void virtnet_set_rx_mode(struct net_device *dev)
2318{
2319 struct virtnet_info *vi = netdev_priv(dev);
f565a7c2 2320 struct scatterlist sg[2];
f565a7c2 2321 struct virtio_net_ctrl_mac *mac_data;
ccffad25 2322 struct netdev_hw_addr *ha;
32e7bfc4 2323 int uc_count;
4cd24eaf 2324 int mc_count;
f565a7c2
AW
2325 void *buf;
2326 int i;
2af7698e 2327
788a8b6d 2328 /* We can't dynamically set ndo_set_rx_mode, so return gracefully */
2af7698e
AW
2329 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
2330 return;
2331
12e57169
MT
2332 vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
2333 vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
2af7698e 2334
12e57169 2335 sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
2af7698e
AW
2336
2337 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
d24bae32 2338 VIRTIO_NET_CTRL_RX_PROMISC, sg))
2af7698e 2339 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
12e57169 2340 vi->ctrl->promisc ? "en" : "dis");
2af7698e 2341
12e57169 2342 sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
2af7698e
AW
2343
2344 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
d24bae32 2345 VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
2af7698e 2346 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
12e57169 2347 vi->ctrl->allmulti ? "en" : "dis");
f565a7c2 2348
32e7bfc4 2349 uc_count = netdev_uc_count(dev);
4cd24eaf 2350 mc_count = netdev_mc_count(dev);
f565a7c2 2351 /* MAC filter - use one buffer for both lists */
4cd24eaf
JP
2352 buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
2353 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
2354 mac_data = buf;
e68ed8f0 2355 if (!buf)
f565a7c2 2356 return;
f565a7c2 2357
23e258e1
AW
2358 sg_init_table(sg, 2);
2359
f565a7c2 2360 /* Store the unicast list and count in the front of the buffer */
fdd819b2 2361 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
ccffad25 2362 i = 0;
32e7bfc4 2363 netdev_for_each_uc_addr(ha, dev)
ccffad25 2364 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
f565a7c2
AW
2365
2366 sg_set_buf(&sg[0], mac_data,
32e7bfc4 2367 sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
f565a7c2
AW
2368
2369 /* multicast list and count fill the end */
32e7bfc4 2370 mac_data = (void *)&mac_data->macs[uc_count][0];
f565a7c2 2371
fdd819b2 2372 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
567ec874 2373 i = 0;
22bedad3
JP
2374 netdev_for_each_mc_addr(ha, dev)
2375 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
f565a7c2
AW
2376
2377 sg_set_buf(&sg[1], mac_data,
4cd24eaf 2378 sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
f565a7c2
AW
2379
2380 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
d24bae32 2381 VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
99e872ae 2382 dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
f565a7c2
AW
2383
2384 kfree(buf);
2af7698e
AW
2385}
2386
80d5c368
PM
2387static int virtnet_vlan_rx_add_vid(struct net_device *dev,
2388 __be16 proto, u16 vid)
0bde9569
AW
2389{
2390 struct virtnet_info *vi = netdev_priv(dev);
2391 struct scatterlist sg;
2392
d7fad4c8 2393 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
12e57169 2394 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
0bde9569
AW
2395
2396 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
d24bae32 2397 VIRTIO_NET_CTRL_VLAN_ADD, &sg))
0bde9569 2398 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
8e586137 2399 return 0;
0bde9569
AW
2400}
2401
80d5c368
PM
2402static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
2403 __be16 proto, u16 vid)
0bde9569
AW
2404{
2405 struct virtnet_info *vi = netdev_priv(dev);
2406 struct scatterlist sg;
2407
d7fad4c8 2408 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
12e57169 2409 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
0bde9569
AW
2410
2411 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
d24bae32 2412 VIRTIO_NET_CTRL_VLAN_DEL, &sg))
0bde9569 2413 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
8e586137 2414 return 0;
0bde9569
AW
2415}
2416
310974fa 2417static void virtnet_clean_affinity(struct virtnet_info *vi)
986a4f4d
JW
2418{
2419 int i;
2420
8898c21c
WG
2421 if (vi->affinity_hint_set) {
2422 for (i = 0; i < vi->max_queue_pairs; i++) {
19e226e8
CR
2423 virtqueue_set_affinity(vi->rq[i].vq, NULL);
2424 virtqueue_set_affinity(vi->sq[i].vq, NULL);
47be2479
WG
2425 }
2426
8898c21c
WG
2427 vi->affinity_hint_set = false;
2428 }
8898c21c 2429}
47be2479 2430
8898c21c
WG
2431static void virtnet_set_affinity(struct virtnet_info *vi)
2432{
2ca653d6
CR
2433 cpumask_var_t mask;
2434 int stragglers;
2435 int group_size;
2436 int i, j, cpu;
2437 int num_cpu;
2438 int stride;
2439
2440 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
310974fa 2441 virtnet_clean_affinity(vi);
8898c21c 2442 return;
986a4f4d
JW
2443 }
2444
2ca653d6
CR
2445 num_cpu = num_online_cpus();
2446 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
2447 stragglers = num_cpu >= vi->curr_queue_pairs ?
2448 num_cpu % vi->curr_queue_pairs :
2449 0;
9b51d9d8 2450 cpu = cpumask_first(cpu_online_mask);
4d99f660 2451
2ca653d6
CR
2452 for (i = 0; i < vi->curr_queue_pairs; i++) {
2453 group_size = stride + (i < stragglers ? 1 : 0);
2454
2455 for (j = 0; j < group_size; j++) {
2456 cpumask_set_cpu(cpu, mask);
2457 cpu = cpumask_next_wrap(cpu, cpu_online_mask,
2458 nr_cpu_ids, false);
2459 }
2460 virtqueue_set_affinity(vi->rq[i].vq, mask);
2461 virtqueue_set_affinity(vi->sq[i].vq, mask);
044ab86d 2462 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
2ca653d6 2463 cpumask_clear(mask);
986a4f4d
JW
2464 }
2465
8898c21c 2466 vi->affinity_hint_set = true;
2ca653d6 2467 free_cpumask_var(mask);
986a4f4d
JW
2468}
2469
8017c279 2470static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
8de4b2f3 2471{
8017c279
SAS
2472 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2473 node);
2474 virtnet_set_affinity(vi);
2475 return 0;
2476}
8de4b2f3 2477
8017c279
SAS
2478static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
2479{
2480 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2481 node_dead);
2482 virtnet_set_affinity(vi);
2483 return 0;
2484}
3ab098df 2485
8017c279
SAS
2486static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
2487{
2488 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2489 node);
2490
310974fa 2491 virtnet_clean_affinity(vi);
8017c279
SAS
2492 return 0;
2493}
2494
2495static enum cpuhp_state virtionet_online;
2496
2497static int virtnet_cpu_notif_add(struct virtnet_info *vi)
2498{
2499 int ret;
2500
2501 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
2502 if (ret)
2503 return ret;
2504 ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2505 &vi->node_dead);
2506 if (!ret)
2507 return ret;
2508 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2509 return ret;
2510}
2511
2512static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
2513{
2514 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2515 cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2516 &vi->node_dead);
986a4f4d
JW
2517}
2518
8f9f4668 2519static void virtnet_get_ringparam(struct net_device *dev,
74624944
HC
2520 struct ethtool_ringparam *ring,
2521 struct kernel_ethtool_ringparam *kernel_ring,
2522 struct netlink_ext_ack *extack)
8f9f4668
RJ
2523{
2524 struct virtnet_info *vi = netdev_priv(dev);
2525
8597b5dd
XZ
2526 ring->rx_max_pending = vi->rq[0].vq->num_max;
2527 ring->tx_max_pending = vi->sq[0].vq->num_max;
2528 ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2529 ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
8f9f4668
RJ
2530}
2531
a335b33f
XZ
2532static int virtnet_set_ringparam(struct net_device *dev,
2533 struct ethtool_ringparam *ring,
2534 struct kernel_ethtool_ringparam *kernel_ring,
2535 struct netlink_ext_ack *extack)
2536{
2537 struct virtnet_info *vi = netdev_priv(dev);
2538 u32 rx_pending, tx_pending;
2539 struct receive_queue *rq;
2540 struct send_queue *sq;
2541 int i, err;
2542
2543 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2544 return -EINVAL;
2545
2546 rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2547 tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
2548
2549 if (ring->rx_pending == rx_pending &&
2550 ring->tx_pending == tx_pending)
2551 return 0;
2552
2553 if (ring->rx_pending > vi->rq[0].vq->num_max)
2554 return -EINVAL;
2555
2556 if (ring->tx_pending > vi->sq[0].vq->num_max)
2557 return -EINVAL;
2558
2559 for (i = 0; i < vi->max_queue_pairs; i++) {
2560 rq = vi->rq + i;
2561 sq = vi->sq + i;
2562
2563 if (ring->tx_pending != tx_pending) {
2564 err = virtnet_tx_resize(vi, sq, ring->tx_pending);
2565 if (err)
2566 return err;
2567 }
2568
2569 if (ring->rx_pending != rx_pending) {
2570 err = virtnet_rx_resize(vi, rq, ring->rx_pending);
2571 if (err)
2572 return err;
2573 }
2574 }
2575
2576 return 0;
8f9f4668
RJ
2577}
2578
c7114b12
AM
2579static bool virtnet_commit_rss_command(struct virtnet_info *vi)
2580{
2581 struct net_device *dev = vi->dev;
2582 struct scatterlist sgs[4];
2583 unsigned int sg_buf_size;
2584
2585 /* prepare sgs */
2586 sg_init_table(sgs, 4);
2587
2588 sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
2589 sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);
2590
2591 sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
2592 sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);
2593
2594 sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
2595 - offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
2596 sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);
2597
2598 sg_buf_size = vi->rss_key_size;
2599 sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);
2600
2601 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
91f41f01
AM
2602 vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
2603 : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
c7114b12
AM
2604 dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
2605 return false;
2606 }
2607 return true;
2608}
2609
2610static void virtnet_init_default_rss(struct virtnet_info *vi)
2611{
2612 u32 indir_val = 0;
2613 int i = 0;
2614
2615 vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
c1170820 2616 vi->rss_hash_types_saved = vi->rss_hash_types_supported;
c7114b12
AM
2617 vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
2618 ? vi->rss_indir_table_size - 1 : 0;
2619 vi->ctrl->rss.unclassified_queue = 0;
2620
2621 for (; i < vi->rss_indir_table_size; ++i) {
2622 indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
2623 vi->ctrl->rss.indirection_table[i] = indir_val;
2624 }
2625
2626 vi->ctrl->rss.max_tx_vq = vi->curr_queue_pairs;
2627 vi->ctrl->rss.hash_key_length = vi->rss_key_size;
2628
2629 netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
2630}
2631
c1170820
AM
2632static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
2633{
2634 info->data = 0;
2635 switch (info->flow_type) {
2636 case TCP_V4_FLOW:
2637 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
2638 info->data = RXH_IP_SRC | RXH_IP_DST |
2639 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2640 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
2641 info->data = RXH_IP_SRC | RXH_IP_DST;
2642 }
2643 break;
2644 case TCP_V6_FLOW:
2645 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
2646 info->data = RXH_IP_SRC | RXH_IP_DST |
2647 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2648 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
2649 info->data = RXH_IP_SRC | RXH_IP_DST;
2650 }
2651 break;
2652 case UDP_V4_FLOW:
2653 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
2654 info->data = RXH_IP_SRC | RXH_IP_DST |
2655 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2656 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
2657 info->data = RXH_IP_SRC | RXH_IP_DST;
2658 }
2659 break;
2660 case UDP_V6_FLOW:
2661 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
2662 info->data = RXH_IP_SRC | RXH_IP_DST |
2663 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2664 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
2665 info->data = RXH_IP_SRC | RXH_IP_DST;
2666 }
2667 break;
2668 case IPV4_FLOW:
2669 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4)
2670 info->data = RXH_IP_SRC | RXH_IP_DST;
2671
2672 break;
2673 case IPV6_FLOW:
2674 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6)
2675 info->data = RXH_IP_SRC | RXH_IP_DST;
2676
2677 break;
2678 default:
2679 info->data = 0;
2680 break;
2681 }
2682}
2683
2684static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
2685{
2686 u32 new_hashtypes = vi->rss_hash_types_saved;
2687 bool is_disable = info->data & RXH_DISCARD;
2688 bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3);
2689
2690 /* supports only 'sd', 'sdfn' and 'r' */
2691 if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable))
2692 return false;
2693
2694 switch (info->flow_type) {
2695 case TCP_V4_FLOW:
2696 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4);
2697 if (!is_disable)
2698 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
2699 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0);
2700 break;
2701 case UDP_V4_FLOW:
2702 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4);
2703 if (!is_disable)
2704 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
2705 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0);
2706 break;
2707 case IPV4_FLOW:
2708 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4;
2709 if (!is_disable)
2710 new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4;
2711 break;
2712 case TCP_V6_FLOW:
2713 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6);
2714 if (!is_disable)
2715 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
2716 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0);
2717 break;
2718 case UDP_V6_FLOW:
2719 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6);
2720 if (!is_disable)
2721 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
2722 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0);
2723 break;
2724 case IPV6_FLOW:
2725 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6;
2726 if (!is_disable)
2727 new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6;
2728 break;
2729 default:
2730 /* unsupported flow */
2731 return false;
2732 }
2733
2734 /* if unsupported hashtype was set */
2735 if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
2736 return false;
2737
2738 if (new_hashtypes != vi->rss_hash_types_saved) {
2739 vi->rss_hash_types_saved = new_hashtypes;
2740 vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
2741 if (vi->dev->features & NETIF_F_RXHASH)
2742 return virtnet_commit_rss_command(vi);
2743 }
2744
2745 return true;
2746}
66846048
RJ
2747
2748static void virtnet_get_drvinfo(struct net_device *dev,
2749 struct ethtool_drvinfo *info)
2750{
2751 struct virtnet_info *vi = netdev_priv(dev);
2752 struct virtio_device *vdev = vi->vdev;
2753
fb3ceec1
WS
2754 strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
2755 strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
2756 strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
66846048
RJ
2757
2758}
2759
d73bcd2c
JW
2760/* TODO: Eliminate OOO packets during switching */
2761static int virtnet_set_channels(struct net_device *dev,
2762 struct ethtool_channels *channels)
2763{
2764 struct virtnet_info *vi = netdev_priv(dev);
2765 u16 queue_pairs = channels->combined_count;
2766 int err;
2767
2768 /* We don't support separate rx/tx channels.
2769 * We don't allow setting 'other' channels.
2770 */
2771 if (channels->rx_count || channels->tx_count || channels->other_count)
2772 return -EINVAL;
2773
c18e9cd6 2774 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
d73bcd2c
JW
2775 return -EINVAL;
2776
f600b690
JF
2777 /* For now we don't support modifying channels while XDP is loaded
2778 * also when XDP is loaded all RX queues have XDP programs so we only
2779 * need to check a single RX queue.
2780 */
2781 if (vi->rq[0].xdp_prog)
2782 return -EINVAL;
2783
a0d1d0f4 2784 cpus_read_lock();
47315329 2785 err = _virtnet_set_queues(vi, queue_pairs);
de33212f 2786 if (err) {
a0d1d0f4 2787 cpus_read_unlock();
de33212f 2788 goto err;
d73bcd2c 2789 }
de33212f 2790 virtnet_set_affinity(vi);
a0d1d0f4 2791 cpus_read_unlock();
d73bcd2c 2792
de33212f
JD
2793 netif_set_real_num_tx_queues(dev, queue_pairs);
2794 netif_set_real_num_rx_queues(dev, queue_pairs);
2795 err:
d73bcd2c
JW
2796 return err;
2797}
2798
d7dfc5cf
TM
2799static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2800{
2801 struct virtnet_info *vi = netdev_priv(dev);
d7dfc5cf 2802 unsigned int i, j;
d7a9a01b 2803 u8 *p = data;
d7dfc5cf
TM
2804
2805 switch (stringset) {
2806 case ETH_SS_STATS:
2807 for (i = 0; i < vi->curr_queue_pairs; i++) {
d7a9a01b
AD
2808 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
2809 ethtool_sprintf(&p, "rx_queue_%u_%s", i,
2810 virtnet_rq_stats_desc[j].desc);
d7dfc5cf
TM
2811 }
2812
2813 for (i = 0; i < vi->curr_queue_pairs; i++) {
d7a9a01b
AD
2814 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
2815 ethtool_sprintf(&p, "tx_queue_%u_%s", i,
2816 virtnet_sq_stats_desc[j].desc);
d7dfc5cf
TM
2817 }
2818 break;
2819 }
2820}
2821
2822static int virtnet_get_sset_count(struct net_device *dev, int sset)
2823{
2824 struct virtnet_info *vi = netdev_priv(dev);
2825
2826 switch (sset) {
2827 case ETH_SS_STATS:
2828 return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
2829 VIRTNET_SQ_STATS_LEN);
2830 default:
2831 return -EOPNOTSUPP;
2832 }
2833}
2834
2835static void virtnet_get_ethtool_stats(struct net_device *dev,
2836 struct ethtool_stats *stats, u64 *data)
2837{
2838 struct virtnet_info *vi = netdev_priv(dev);
2839 unsigned int idx = 0, start, i, j;
2840 const u8 *stats_base;
2841 size_t offset;
2842
2843 for (i = 0; i < vi->curr_queue_pairs; i++) {
2844 struct receive_queue *rq = &vi->rq[i];
2845
d46eeeaf 2846 stats_base = (u8 *)&rq->stats;
d7dfc5cf 2847 do {
068c38ad 2848 start = u64_stats_fetch_begin(&rq->stats.syncp);
d7dfc5cf
TM
2849 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
2850 offset = virtnet_rq_stats_desc[j].offset;
2851 data[idx + j] = *(u64 *)(stats_base + offset);
2852 }
068c38ad 2853 } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
d7dfc5cf
TM
2854 idx += VIRTNET_RQ_STATS_LEN;
2855 }
2856
2857 for (i = 0; i < vi->curr_queue_pairs; i++) {
2858 struct send_queue *sq = &vi->sq[i];
2859
2860 stats_base = (u8 *)&sq->stats;
2861 do {
068c38ad 2862 start = u64_stats_fetch_begin(&sq->stats.syncp);
d7dfc5cf
TM
2863 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
2864 offset = virtnet_sq_stats_desc[j].offset;
2865 data[idx + j] = *(u64 *)(stats_base + offset);
2866 }
068c38ad 2867 } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
d7dfc5cf
TM
2868 idx += VIRTNET_SQ_STATS_LEN;
2869 }
2870}
2871
d73bcd2c
JW
2872static void virtnet_get_channels(struct net_device *dev,
2873 struct ethtool_channels *channels)
2874{
2875 struct virtnet_info *vi = netdev_priv(dev);
2876
2877 channels->combined_count = vi->curr_queue_pairs;
2878 channels->max_combined = vi->max_queue_pairs;
2879 channels->max_other = 0;
2880 channels->rx_count = 0;
2881 channels->tx_count = 0;
2882 channels->other_count = 0;
2883}
2884
ebb6b4b1
PR
2885static int virtnet_set_link_ksettings(struct net_device *dev,
2886 const struct ethtool_link_ksettings *cmd)
16032be5
NA
2887{
2888 struct virtnet_info *vi = netdev_priv(dev);
16032be5 2889
9aedc6e2
CF
2890 return ethtool_virtdev_set_link_ksettings(dev, cmd,
2891 &vi->speed, &vi->duplex);
16032be5
NA
2892}
2893
ebb6b4b1
PR
2894static int virtnet_get_link_ksettings(struct net_device *dev,
2895 struct ethtool_link_ksettings *cmd)
16032be5
NA
2896{
2897 struct virtnet_info *vi = netdev_priv(dev);
2898
ebb6b4b1
PR
2899 cmd->base.speed = vi->speed;
2900 cmd->base.duplex = vi->duplex;
2901 cmd->base.port = PORT_OTHER;
16032be5
NA
2902
2903 return 0;
2904}
2905
699b045a
AK
2906static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
2907 struct ethtool_coalesce *ec)
2908{
2909 struct scatterlist sgs_tx, sgs_rx;
2910 struct virtio_net_ctrl_coal_tx coal_tx;
2911 struct virtio_net_ctrl_coal_rx coal_rx;
2912
2913 coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
2914 coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
2915 sg_init_one(&sgs_tx, &coal_tx, sizeof(coal_tx));
2916
2917 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
2918 VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
2919 &sgs_tx))
2920 return -EINVAL;
2921
2922 /* Save parameters */
2923 vi->tx_usecs = ec->tx_coalesce_usecs;
2924 vi->tx_max_packets = ec->tx_max_coalesced_frames;
2925
2926 coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
2927 coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
2928 sg_init_one(&sgs_rx, &coal_rx, sizeof(coal_rx));
2929
2930 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
2931 VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
2932 &sgs_rx))
2933 return -EINVAL;
2934
2935 /* Save parameters */
2936 vi->rx_usecs = ec->rx_coalesce_usecs;
2937 vi->rx_max_packets = ec->rx_max_coalesced_frames;
2938
2939 return 0;
2940}
2941
2942static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
2943{
2944 /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
2945 * feature is negotiated.
2946 */
2947 if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
2948 return -EOPNOTSUPP;
2949
2950 if (ec->tx_max_coalesced_frames > 1 ||
2951 ec->rx_max_coalesced_frames != 1)
2952 return -EINVAL;
2953
2954 return 0;
2955}
2956
0c465be1 2957static int virtnet_set_coalesce(struct net_device *dev,
f3ccfda1
YM
2958 struct ethtool_coalesce *ec,
2959 struct kernel_ethtool_coalesce *kernel_coal,
2960 struct netlink_ext_ack *extack)
0c465be1 2961{
0c465be1 2962 struct virtnet_info *vi = netdev_priv(dev);
699b045a
AK
2963 int ret, i, napi_weight;
2964 bool update_napi = false;
0c465be1 2965
699b045a 2966 /* Can't change NAPI weight if the link is up */
0c465be1 2967 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
0c465be1
JW
2968 if (napi_weight ^ vi->sq[0].napi.weight) {
2969 if (dev->flags & IFF_UP)
2970 return -EBUSY;
699b045a
AK
2971 else
2972 update_napi = true;
2973 }
2974
2975 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
2976 ret = virtnet_send_notf_coal_cmds(vi, ec);
2977 else
2978 ret = virtnet_coal_params_supported(ec);
2979
2980 if (ret)
2981 return ret;
2982
2983 if (update_napi) {
0c465be1
JW
2984 for (i = 0; i < vi->max_queue_pairs; i++)
2985 vi->sq[i].napi.weight = napi_weight;
2986 }
2987
699b045a 2988 return ret;
0c465be1
JW
2989}
2990
2991static int virtnet_get_coalesce(struct net_device *dev,
f3ccfda1
YM
2992 struct ethtool_coalesce *ec,
2993 struct kernel_ethtool_coalesce *kernel_coal,
2994 struct netlink_ext_ack *extack)
0c465be1 2995{
0c465be1
JW
2996 struct virtnet_info *vi = netdev_priv(dev);
2997
699b045a
AK
2998 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
2999 ec->rx_coalesce_usecs = vi->rx_usecs;
3000 ec->tx_coalesce_usecs = vi->tx_usecs;
3001 ec->tx_max_coalesced_frames = vi->tx_max_packets;
3002 ec->rx_max_coalesced_frames = vi->rx_max_packets;
3003 } else {
3004 ec->rx_max_coalesced_frames = 1;
0c465be1 3005
699b045a
AK
3006 if (vi->sq[0].napi.weight)
3007 ec->tx_max_coalesced_frames = 1;
3008 }
0c465be1
JW
3009
3010 return 0;
3011}
3012
16032be5
NA
3013static void virtnet_init_settings(struct net_device *dev)
3014{
3015 struct virtnet_info *vi = netdev_priv(dev);
3016
3017 vi->speed = SPEED_UNKNOWN;
3018 vi->duplex = DUPLEX_UNKNOWN;
3019}
3020
faa9b39f
JB
3021static void virtnet_update_settings(struct virtnet_info *vi)
3022{
3023 u32 speed;
3024 u8 duplex;
3025
3026 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
3027 return;
3028
64ffa39d
MT
3029 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
3030
faa9b39f
JB
3031 if (ethtool_validate_speed(speed))
3032 vi->speed = speed;
64ffa39d
MT
3033
3034 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
3035
faa9b39f
JB
3036 if (ethtool_validate_duplex(duplex))
3037 vi->duplex = duplex;
3038}
3039
c7114b12
AM
3040static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
3041{
3042 return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
3043}
3044
3045static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
3046{
3047 return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
3048}
3049
3050static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
3051{
3052 struct virtnet_info *vi = netdev_priv(dev);
3053 int i;
3054
3055 if (indir) {
3056 for (i = 0; i < vi->rss_indir_table_size; ++i)
3057 indir[i] = vi->ctrl->rss.indirection_table[i];
3058 }
3059
3060 if (key)
3061 memcpy(key, vi->ctrl->rss.key, vi->rss_key_size);
3062
3063 if (hfunc)
3064 *hfunc = ETH_RSS_HASH_TOP;
3065
3066 return 0;
3067}
3068
3069static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc)
3070{
3071 struct virtnet_info *vi = netdev_priv(dev);
3072 int i;
3073
3074 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
3075 return -EOPNOTSUPP;
3076
3077 if (indir) {
3078 for (i = 0; i < vi->rss_indir_table_size; ++i)
3079 vi->ctrl->rss.indirection_table[i] = indir[i];
3080 }
3081 if (key)
3082 memcpy(vi->ctrl->rss.key, key, vi->rss_key_size);
3083
3084 virtnet_commit_rss_command(vi);
3085
3086 return 0;
3087}
3088
3089static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
3090{
3091 struct virtnet_info *vi = netdev_priv(dev);
3092 int rc = 0;
3093
3094 switch (info->cmd) {
3095 case ETHTOOL_GRXRINGS:
3096 info->data = vi->curr_queue_pairs;
c1170820
AM
3097 break;
3098 case ETHTOOL_GRXFH:
3099 virtnet_get_hashflow(vi, info);
3100 break;
3101 default:
3102 rc = -EOPNOTSUPP;
3103 }
3104
3105 return rc;
3106}
3107
3108static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
3109{
3110 struct virtnet_info *vi = netdev_priv(dev);
3111 int rc = 0;
3112
3113 switch (info->cmd) {
3114 case ETHTOOL_SRXFH:
3115 if (!virtnet_set_hashflow(vi, info))
3116 rc = -EINVAL;
3117
c7114b12
AM
3118 break;
3119 default:
3120 rc = -EOPNOTSUPP;
3121 }
3122
3123 return rc;
3124}
3125
0fc0b732 3126static const struct ethtool_ops virtnet_ethtool_ops = {
699b045a
AK
3127 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
3128 ETHTOOL_COALESCE_USECS,
66846048 3129 .get_drvinfo = virtnet_get_drvinfo,
9f4d26d0 3130 .get_link = ethtool_op_get_link,
8f9f4668 3131 .get_ringparam = virtnet_get_ringparam,
a335b33f 3132 .set_ringparam = virtnet_set_ringparam,
d7dfc5cf
TM
3133 .get_strings = virtnet_get_strings,
3134 .get_sset_count = virtnet_get_sset_count,
3135 .get_ethtool_stats = virtnet_get_ethtool_stats,
d73bcd2c
JW
3136 .set_channels = virtnet_set_channels,
3137 .get_channels = virtnet_get_channels,
074c3582 3138 .get_ts_info = ethtool_op_get_ts_info,
ebb6b4b1
PR
3139 .get_link_ksettings = virtnet_get_link_ksettings,
3140 .set_link_ksettings = virtnet_set_link_ksettings,
0c465be1
JW
3141 .set_coalesce = virtnet_set_coalesce,
3142 .get_coalesce = virtnet_get_coalesce,
c7114b12
AM
3143 .get_rxfh_key_size = virtnet_get_rxfh_key_size,
3144 .get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
3145 .get_rxfh = virtnet_get_rxfh,
3146 .set_rxfh = virtnet_set_rxfh,
3147 .get_rxnfc = virtnet_get_rxnfc,
c1170820 3148 .set_rxnfc = virtnet_set_rxnfc,
a9ea3fc6
HX
3149};
3150
9fe7bfce
JF
3151static void virtnet_freeze_down(struct virtio_device *vdev)
3152{
3153 struct virtnet_info *vi = vdev->priv;
9fe7bfce
JF
3154
3155 /* Make sure no work handler is accessing the device */
3156 flush_work(&vi->config_work);
3157
05c998b7 3158 netif_tx_lock_bh(vi->dev);
9fe7bfce 3159 netif_device_detach(vi->dev);
05c998b7 3160 netif_tx_unlock_bh(vi->dev);
8af52fe9
SG
3161 if (netif_running(vi->dev))
3162 virtnet_close(vi->dev);
9fe7bfce
JF
3163}
3164
3165static int init_vqs(struct virtnet_info *vi);
3166
3167static int virtnet_restore_up(struct virtio_device *vdev)
3168{
3169 struct virtnet_info *vi = vdev->priv;
8af52fe9 3170 int err;
9fe7bfce
JF
3171
3172 err = init_vqs(vi);
3173 if (err)
3174 return err;
3175
3176 virtio_device_ready(vdev);
3177
5a159128
JW
3178 enable_delayed_refill(vi);
3179
9fe7bfce 3180 if (netif_running(vi->dev)) {
8af52fe9
SG
3181 err = virtnet_open(vi->dev);
3182 if (err)
3183 return err;
9fe7bfce
JF
3184 }
3185
05c998b7 3186 netif_tx_lock_bh(vi->dev);
9fe7bfce 3187 netif_device_attach(vi->dev);
05c998b7 3188 netif_tx_unlock_bh(vi->dev);
9fe7bfce
JF
3189 return err;
3190}
3191
3f93522f
JW
3192static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
3193{
3194 struct scatterlist sg;
12e57169 3195 vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
3f93522f 3196
12e57169 3197 sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
3f93522f
JW
3198
3199 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
3200 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
7934b481 3201 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
3f93522f
JW
3202 return -EINVAL;
3203 }
3204
3205 return 0;
3206}
3207
3208static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
3209{
3210 u64 offloads = 0;
3211
3212 if (!vi->guest_offloads)
3213 return 0;
3214
3f93522f
JW
3215 return virtnet_set_guest_offloads(vi, offloads);
3216}
3217
3218static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
3219{
3220 u64 offloads = vi->guest_offloads;
3221
3222 if (!vi->guest_offloads)
3223 return 0;
3f93522f
JW
3224
3225 return virtnet_set_guest_offloads(vi, offloads);
3226}
3227
9861ce03
JK
3228static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
3229 struct netlink_ext_ack *extack)
f600b690 3230{
e814b958
HQ
3231 unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
3232 sizeof(struct skb_shared_info));
3233 unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN;
f600b690
JF
3234 struct virtnet_info *vi = netdev_priv(dev);
3235 struct bpf_prog *old_prog;
017b29c3 3236 u16 xdp_qp = 0, curr_qp;
672aafd5 3237 int i, err;
f600b690 3238
3f93522f
JW
3239 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
3240 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3241 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
3242 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
18ba58e1 3243 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
418044e1
AM
3244 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) ||
3245 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) ||
3246 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) {
dbcf24d1 3247 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
f600b690
JF
3248 return -EOPNOTSUPP;
3249 }
3250
3251 if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
4d463c4d 3252 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
f600b690
JF
3253 return -EINVAL;
3254 }
3255
8d9bc36d
HQ
3256 if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) {
3257 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags");
3258 netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz);
f600b690
JF
3259 return -EINVAL;
3260 }
3261
672aafd5
JF
3262 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
3263 if (prog)
3264 xdp_qp = nr_cpu_ids;
3265
3266 /* XDP requires extra queues for XDP_TX */
3267 if (curr_qp + xdp_qp > vi->max_queue_pairs) {
9ce4e3d6
XZ
3268 netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
3269 curr_qp + xdp_qp, vi->max_queue_pairs);
97c2c69e 3270 xdp_qp = 0;
672aafd5
JF
3271 }
3272
03aa6d34
TM
3273 old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
3274 if (!prog && !old_prog)
3275 return 0;
3276
85192dbf
AN
3277 if (prog)
3278 bpf_prog_add(prog, vi->max_queue_pairs - 1);
2de2f7f4 3279
4941d472 3280 /* Make sure NAPI is not using any XDP TX queues for RX. */
534da5e8
TM
3281 if (netif_running(dev)) {
3282 for (i = 0; i < vi->max_queue_pairs; i++) {
4e09ff53 3283 napi_disable(&vi->rq[i].napi);
534da5e8
TM
3284 virtnet_napi_tx_disable(&vi->sq[i].napi);
3285 }
3286 }
f600b690 3287
03aa6d34
TM
3288 if (!prog) {
3289 for (i = 0; i < vi->max_queue_pairs; i++) {
3290 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
3291 if (i == 0)
3292 virtnet_restore_guest_offloads(vi);
3293 }
3294 synchronize_net();
3295 }
f600b690 3296
4941d472
JW
3297 err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
3298 if (err)
3299 goto err;
188313c1 3300 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
4941d472 3301 vi->xdp_queue_pairs = xdp_qp;
672aafd5 3302
03aa6d34 3303 if (prog) {
97c2c69e 3304 vi->xdp_enabled = true;
03aa6d34
TM
3305 for (i = 0; i < vi->max_queue_pairs; i++) {
3306 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
3307 if (i == 0 && !old_prog)
3f93522f 3308 virtnet_clear_guest_offloads(vi);
3f93522f 3309 }
66c0e13a 3310 if (!old_prog)
30bbf891 3311 xdp_features_set_redirect_target(dev, true);
97c2c69e 3312 } else {
66c0e13a 3313 xdp_features_clear_redirect_target(dev);
97c2c69e 3314 vi->xdp_enabled = false;
03aa6d34
TM
3315 }
3316
3317 for (i = 0; i < vi->max_queue_pairs; i++) {
f600b690
JF
3318 if (old_prog)
3319 bpf_prog_put(old_prog);
534da5e8 3320 if (netif_running(dev)) {
4e09ff53 3321 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
534da5e8
TM
3322 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
3323 &vi->sq[i].napi);
3324 }
f600b690
JF
3325 }
3326
3327 return 0;
2de2f7f4 3328
4941d472 3329err:
03aa6d34
TM
3330 if (!prog) {
3331 virtnet_clear_guest_offloads(vi);
3332 for (i = 0; i < vi->max_queue_pairs; i++)
3333 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
3334 }
3335
8be4d9a4 3336 if (netif_running(dev)) {
534da5e8 3337 for (i = 0; i < vi->max_queue_pairs; i++) {
8be4d9a4 3338 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
534da5e8
TM
3339 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
3340 &vi->sq[i].napi);
3341 }
8be4d9a4 3342 }
2de2f7f4
JF
3343 if (prog)
3344 bpf_prog_sub(prog, vi->max_queue_pairs - 1);
3345 return err;
f600b690
JF
3346}
3347
f4e63525 3348static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
f600b690
JF
3349{
3350 switch (xdp->command) {
3351 case XDP_SETUP_PROG:
9861ce03 3352 return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
f600b690
JF
3353 default:
3354 return -EINVAL;
3355 }
3356}
3357
ba5e4426
SS
3358static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
3359 size_t len)
3360{
3361 struct virtnet_info *vi = netdev_priv(dev);
3362 int ret;
3363
3364 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
3365 return -EOPNOTSUPP;
3366
3367 ret = snprintf(buf, len, "sby");
3368 if (ret >= len)
3369 return -EOPNOTSUPP;
3370
3371 return 0;
3372}
3373
a02e8964
WB
3374static int virtnet_set_features(struct net_device *dev,
3375 netdev_features_t features)
3376{
3377 struct virtnet_info *vi = netdev_priv(dev);
cf8691cb 3378 u64 offloads;
a02e8964
WB
3379 int err;
3380
dbcf24d1 3381 if ((dev->features ^ features) & NETIF_F_GRO_HW) {
97c2c69e 3382 if (vi->xdp_enabled)
cf8691cb
MT
3383 return -EBUSY;
3384
dbcf24d1 3385 if (features & NETIF_F_GRO_HW)
cf8691cb 3386 offloads = vi->guest_offloads_capable;
a02e8964 3387 else
cf8691cb 3388 offloads = vi->guest_offloads_capable &
dbcf24d1 3389 ~GUEST_OFFLOAD_GRO_HW_MASK;
a02e8964 3390
cf8691cb
MT
3391 err = virtnet_set_guest_offloads(vi, offloads);
3392 if (err)
3393 return err;
3394 vi->guest_offloads = offloads;
a02e8964
WB
3395 }
3396
c7114b12
AM
3397 if ((dev->features ^ features) & NETIF_F_RXHASH) {
3398 if (features & NETIF_F_RXHASH)
c1170820 3399 vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
c7114b12
AM
3400 else
3401 vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
3402
3403 if (!virtnet_commit_rss_command(vi))
3404 return -EINVAL;
3405 }
3406
a02e8964
WB
3407 return 0;
3408}
3409
a520794b
TL
3410static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
3411{
3412 struct virtnet_info *priv = netdev_priv(dev);
3413 struct send_queue *sq = &priv->sq[txqueue];
3414 struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
3415
3416 u64_stats_update_begin(&sq->stats.syncp);
3417 sq->stats.tx_timeouts++;
3418 u64_stats_update_end(&sq->stats.syncp);
3419
3420 netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
3421 txqueue, sq->name, sq->vq->index, sq->vq->name,
5337824f 3422 jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
a520794b
TL
3423}
3424
76288b4e
SH
3425static const struct net_device_ops virtnet_netdev = {
3426 .ndo_open = virtnet_open,
3427 .ndo_stop = virtnet_close,
3428 .ndo_start_xmit = start_xmit,
3429 .ndo_validate_addr = eth_validate_addr,
9c46f6d4 3430 .ndo_set_mac_address = virtnet_set_mac_address,
2af7698e 3431 .ndo_set_rx_mode = virtnet_set_rx_mode,
3fa2a1df 3432 .ndo_get_stats64 = virtnet_stats,
1824a989
AW
3433 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
3434 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
f4e63525 3435 .ndo_bpf = virtnet_xdp,
186b3c99 3436 .ndo_xdp_xmit = virtnet_xdp_xmit,
2836b4f2 3437 .ndo_features_check = passthru_features_check,
ba5e4426 3438 .ndo_get_phys_port_name = virtnet_get_phys_port_name,
a02e8964 3439 .ndo_set_features = virtnet_set_features,
a520794b 3440 .ndo_tx_timeout = virtnet_tx_timeout,
76288b4e
SH
3441};
3442
586d17c5 3443static void virtnet_config_changed_work(struct work_struct *work)
9f4d26d0 3444{
586d17c5
JW
3445 struct virtnet_info *vi =
3446 container_of(work, struct virtnet_info, config_work);
9f4d26d0
MM
3447 u16 v;
3448
855e0c52
RR
3449 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
3450 struct virtio_net_config, status, &v) < 0)
507613bf 3451 return;
586d17c5
JW
3452
3453 if (v & VIRTIO_NET_S_ANNOUNCE) {
ee89bab1 3454 netdev_notify_peers(vi->dev);
586d17c5
JW
3455 virtnet_ack_link_announce(vi);
3456 }
9f4d26d0
MM
3457
3458 /* Ignore unknown (future) status bits */
3459 v &= VIRTIO_NET_S_LINK_UP;
3460
3461 if (vi->status == v)
507613bf 3462 return;
9f4d26d0
MM
3463
3464 vi->status = v;
3465
3466 if (vi->status & VIRTIO_NET_S_LINK_UP) {
faa9b39f 3467 virtnet_update_settings(vi);
9f4d26d0 3468 netif_carrier_on(vi->dev);
986a4f4d 3469 netif_tx_wake_all_queues(vi->dev);
9f4d26d0
MM
3470 } else {
3471 netif_carrier_off(vi->dev);
986a4f4d 3472 netif_tx_stop_all_queues(vi->dev);
9f4d26d0
MM
3473 }
3474}
3475
3476static void virtnet_config_changed(struct virtio_device *vdev)
3477{
3478 struct virtnet_info *vi = vdev->priv;
3479
3b07e9ca 3480 schedule_work(&vi->config_work);
9f4d26d0
MM
3481}
3482
986a4f4d
JW
3483static void virtnet_free_queues(struct virtnet_info *vi)
3484{
d4fb84ee
AV
3485 int i;
3486
ab3971b1 3487 for (i = 0; i < vi->max_queue_pairs; i++) {
5198d545
JK
3488 __netif_napi_del(&vi->rq[i].napi);
3489 __netif_napi_del(&vi->sq[i].napi);
ab3971b1 3490 }
d4fb84ee 3491
5198d545 3492 /* We called __netif_napi_del(),
963abe5c
ED
3493 * we need to respect an RCU grace period before freeing vi->rq
3494 */
3495 synchronize_net();
3496
986a4f4d
JW
3497 kfree(vi->rq);
3498 kfree(vi->sq);
12e57169 3499 kfree(vi->ctrl);
986a4f4d
JW
3500}
3501
47315329 3502static void _free_receive_bufs(struct virtnet_info *vi)
986a4f4d 3503{
f600b690 3504 struct bpf_prog *old_prog;
986a4f4d
JW
3505 int i;
3506
3507 for (i = 0; i < vi->max_queue_pairs; i++) {
3508 while (vi->rq[i].pages)
3509 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
f600b690
JF
3510
3511 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
3512 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
3513 if (old_prog)
3514 bpf_prog_put(old_prog);
986a4f4d 3515 }
47315329
JF
3516}
3517
3518static void free_receive_bufs(struct virtnet_info *vi)
3519{
3520 rtnl_lock();
3521 _free_receive_bufs(vi);
f600b690 3522 rtnl_unlock();
986a4f4d
JW
3523}
3524
fb51879d
MD
3525static void free_receive_page_frags(struct virtnet_info *vi)
3526{
3527 int i;
3528 for (i = 0; i < vi->max_queue_pairs; i++)
3529 if (vi->rq[i].alloc_frag.page)
3530 put_page(vi->rq[i].alloc_frag.page);
3531}
3532
6e345f8c
XZ
3533static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
3534{
3535 if (!is_xdp_frame(buf))
3536 dev_kfree_skb(buf);
3537 else
3538 xdp_return_frame(ptr_to_xdp(buf));
3539}
3540
3541static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
3542{
3543 struct virtnet_info *vi = vq->vdev->priv;
3544 int i = vq2rxq(vq);
3545
3546 if (vi->mergeable_rx_bufs)
3547 put_page(virt_to_head_page(buf));
3548 else if (vi->big_packets)
3549 give_pages(&vi->rq[i], buf);
3550 else
3551 put_page(virt_to_head_page(buf));
3552}
3553
986a4f4d
JW
3554static void free_unused_bufs(struct virtnet_info *vi)
3555{
3556 void *buf;
3557 int i;
3558
3559 for (i = 0; i < vi->max_queue_pairs; i++) {
3560 struct virtqueue *vq = vi->sq[i].vq;
6e345f8c
XZ
3561 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
3562 virtnet_sq_free_unused_buf(vq, buf);
f8bb5104 3563 cond_resched();
986a4f4d
JW
3564 }
3565
3566 for (i = 0; i < vi->max_queue_pairs; i++) {
3567 struct virtqueue *vq = vi->rq[i].vq;
6e345f8c
XZ
3568 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
3569 virtnet_rq_free_unused_buf(vq, buf);
f8bb5104 3570 cond_resched();
986a4f4d
JW
3571 }
3572}
3573
e9d7417b
JW
3574static void virtnet_del_vqs(struct virtnet_info *vi)
3575{
3576 struct virtio_device *vdev = vi->vdev;
3577
310974fa 3578 virtnet_clean_affinity(vi);
986a4f4d 3579
e9d7417b 3580 vdev->config->del_vqs(vdev);
986a4f4d
JW
3581
3582 virtnet_free_queues(vi);
e9d7417b
JW
3583}
3584
d85b758f
MT
3585/* How large should a single buffer be so a queue full of these can fit at
3586 * least one full packet?
3587 * Logic below assumes the mergeable buffer header is used.
3588 */
3589static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
3590{
c1ddc42d 3591 const unsigned int hdr_len = vi->hdr_len;
d85b758f
MT
3592 unsigned int rq_size = virtqueue_get_vring_size(vq);
3593 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
3594 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
3595 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
3596
f0c3192c
MT
3597 return max(max(min_buf_len, hdr_len) - hdr_len,
3598 (unsigned int)GOOD_PACKET_LEN);
d85b758f
MT
3599}
3600
986a4f4d 3601static int virtnet_find_vqs(struct virtnet_info *vi)
3f9c10b0 3602{
986a4f4d
JW
3603 vq_callback_t **callbacks;
3604 struct virtqueue **vqs;
3605 int ret = -ENOMEM;
3606 int i, total_vqs;
3607 const char **names;
d45b897b 3608 bool *ctx;
986a4f4d
JW
3609
3610 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
3611 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
3612 * possible control vq.
3613 */
3614 total_vqs = vi->max_queue_pairs * 2 +
3615 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
3616
3617 /* Allocate space for find_vqs parameters */
6396bb22 3618 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
986a4f4d
JW
3619 if (!vqs)
3620 goto err_vq;
6da2ec56 3621 callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
986a4f4d
JW
3622 if (!callbacks)
3623 goto err_callback;
6da2ec56 3624 names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
986a4f4d
JW
3625 if (!names)
3626 goto err_names;
192f68cf 3627 if (!vi->big_packets || vi->mergeable_rx_bufs) {
6396bb22 3628 ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
d45b897b
MT
3629 if (!ctx)
3630 goto err_ctx;
3631 } else {
3632 ctx = NULL;
3633 }
986a4f4d
JW
3634
3635 /* Parameters for control virtqueue, if any */
3636 if (vi->has_cvq) {
3637 callbacks[total_vqs - 1] = NULL;
3638 names[total_vqs - 1] = "control";
3639 }
3f9c10b0 3640
986a4f4d
JW
3641 /* Allocate/initialize parameters for send/receive virtqueues */
3642 for (i = 0; i < vi->max_queue_pairs; i++) {
3643 callbacks[rxq2vq(i)] = skb_recv_done;
3644 callbacks[txq2vq(i)] = skb_xmit_done;
3645 sprintf(vi->rq[i].name, "input.%d", i);
3646 sprintf(vi->sq[i].name, "output.%d", i);
3647 names[rxq2vq(i)] = vi->rq[i].name;
3648 names[txq2vq(i)] = vi->sq[i].name;
d45b897b
MT
3649 if (ctx)
3650 ctx[rxq2vq(i)] = true;
986a4f4d 3651 }
3f9c10b0 3652
2e9ca760
MT
3653 ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
3654 names, ctx, NULL);
986a4f4d
JW
3655 if (ret)
3656 goto err_find;
3f9c10b0 3657
986a4f4d
JW
3658 if (vi->has_cvq) {
3659 vi->cvq = vqs[total_vqs - 1];
3f9c10b0 3660 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
f646968f 3661 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3f9c10b0 3662 }
986a4f4d
JW
3663
3664 for (i = 0; i < vi->max_queue_pairs; i++) {
3665 vi->rq[i].vq = vqs[rxq2vq(i)];
d85b758f 3666 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
986a4f4d
JW
3667 vi->sq[i].vq = vqs[txq2vq(i)];
3668 }
3669
2fa3c8a8 3670 /* run here: ret == 0. */
986a4f4d 3671
986a4f4d
JW
3672
3673err_find:
d45b897b
MT
3674 kfree(ctx);
3675err_ctx:
986a4f4d
JW
3676 kfree(names);
3677err_names:
3678 kfree(callbacks);
3679err_callback:
3680 kfree(vqs);
3681err_vq:
3682 return ret;
3683}
3684
3685static int virtnet_alloc_queues(struct virtnet_info *vi)
3686{
3687 int i;
3688
122b84a1
MG
3689 if (vi->has_cvq) {
3690 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
3691 if (!vi->ctrl)
3692 goto err_ctrl;
3693 } else {
3694 vi->ctrl = NULL;
3695 }
6396bb22 3696 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
986a4f4d
JW
3697 if (!vi->sq)
3698 goto err_sq;
6396bb22 3699 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
008d4278 3700 if (!vi->rq)
986a4f4d
JW
3701 goto err_rq;
3702
3703 INIT_DELAYED_WORK(&vi->refill, refill_work);
3704 for (i = 0; i < vi->max_queue_pairs; i++) {
3705 vi->rq[i].pages = NULL;
d484735d
JK
3706 netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll,
3707 napi_weight);
8d602e1a
JK
3708 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
3709 virtnet_poll_tx,
3710 napi_tx ? napi_weight : 0);
986a4f4d
JW
3711
3712 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
5377d758 3713 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
986a4f4d 3714 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
d7dfc5cf
TM
3715
3716 u64_stats_init(&vi->rq[i].stats.syncp);
3717 u64_stats_init(&vi->sq[i].stats.syncp);
986a4f4d
JW
3718 }
3719
3720 return 0;
3721
3722err_rq:
3723 kfree(vi->sq);
3724err_sq:
12e57169
MT
3725 kfree(vi->ctrl);
3726err_ctrl:
986a4f4d
JW
3727 return -ENOMEM;
3728}
3729
3730static int init_vqs(struct virtnet_info *vi)
3731{
3732 int ret;
3733
3734 /* Allocate send & receive queues */
3735 ret = virtnet_alloc_queues(vi);
3736 if (ret)
3737 goto err;
3738
3739 ret = virtnet_find_vqs(vi);
3740 if (ret)
3741 goto err_free;
3742
a0d1d0f4 3743 cpus_read_lock();
8898c21c 3744 virtnet_set_affinity(vi);
a0d1d0f4 3745 cpus_read_unlock();
47be2479 3746
986a4f4d
JW
3747 return 0;
3748
3749err_free:
3750 virtnet_free_queues(vi);
3751err:
3752 return ret;
3f9c10b0
AS
3753}
3754
fbf28d78
MD
3755#ifdef CONFIG_SYSFS
3756static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
718ad681 3757 char *buf)
fbf28d78
MD
3758{
3759 struct virtnet_info *vi = netdev_priv(queue->dev);
3760 unsigned int queue_index = get_netdev_rx_queue_index(queue);
3cc81a9a
JW
3761 unsigned int headroom = virtnet_get_headroom(vi);
3762 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
5377d758 3763 struct ewma_pkt_len *avg;
fbf28d78
MD
3764
3765 BUG_ON(queue_index >= vi->max_queue_pairs);
3766 avg = &vi->rq[queue_index].mrg_avg_pkt_len;
d85b758f 3767 return sprintf(buf, "%u\n",
3cc81a9a
JW
3768 get_mergeable_buf_len(&vi->rq[queue_index], avg,
3769 SKB_DATA_ALIGN(headroom + tailroom)));
fbf28d78
MD
3770}
3771
3772static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
3773 __ATTR_RO(mergeable_rx_buffer_size);
3774
3775static struct attribute *virtio_net_mrg_rx_attrs[] = {
3776 &mergeable_rx_buffer_size_attribute.attr,
3777 NULL
3778};
3779
3780static const struct attribute_group virtio_net_mrg_rx_group = {
3781 .name = "virtio_net",
3782 .attrs = virtio_net_mrg_rx_attrs
3783};
3784#endif
3785
892d6eb1
JW
3786static bool virtnet_fail_on_feature(struct virtio_device *vdev,
3787 unsigned int fbit,
3788 const char *fname, const char *dname)
3789{
3790 if (!virtio_has_feature(vdev, fbit))
3791 return false;
3792
3793 dev_err(&vdev->dev, "device advertises feature %s but not %s",
3794 fname, dname);
3795
3796 return true;
3797}
3798
3799#define VIRTNET_FAIL_ON(vdev, fbit, dbit) \
3800 virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
3801
3802static bool virtnet_validate_features(struct virtio_device *vdev)
3803{
3804 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
3805 (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
3806 "VIRTIO_NET_F_CTRL_VQ") ||
3807 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
3808 "VIRTIO_NET_F_CTRL_VQ") ||
3809 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
3810 "VIRTIO_NET_F_CTRL_VQ") ||
3811 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
3812 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
c7114b12
AM
3813 "VIRTIO_NET_F_CTRL_VQ") ||
3814 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
91f41f01
AM
3815 "VIRTIO_NET_F_CTRL_VQ") ||
3816 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
699b045a
AK
3817 "VIRTIO_NET_F_CTRL_VQ") ||
3818 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
892d6eb1
JW
3819 "VIRTIO_NET_F_CTRL_VQ"))) {
3820 return false;
3821 }
3822
3823 return true;
3824}
3825
d0c2c997
JW
3826#define MIN_MTU ETH_MIN_MTU
3827#define MAX_MTU ETH_MAX_MTU
3828
fe36cbe0 3829static int virtnet_validate(struct virtio_device *vdev)
296f96fc 3830{
6ba42248
MT
3831 if (!vdev->config->get) {
3832 dev_err(&vdev->dev, "%s failure: config access disabled\n",
3833 __func__);
3834 return -EINVAL;
3835 }
3836
892d6eb1
JW
3837 if (!virtnet_validate_features(vdev))
3838 return -EINVAL;
3839
fe36cbe0
MT
3840 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
3841 int mtu = virtio_cread16(vdev,
3842 offsetof(struct virtio_net_config,
3843 mtu));
3844 if (mtu < MIN_MTU)
3845 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
3846 }
3847
7c06458c
LV
3848 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) &&
3849 !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
3850 dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby");
3851 __virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY);
3852 }
3853
fe36cbe0
MT
3854 return 0;
3855}
3856
46cd26f4
GL
3857static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
3858{
3859 return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3860 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
3861 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
418044e1
AM
3862 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
3863 (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) &&
3864 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6));
46cd26f4
GL
3865}
3866
4959aebb
GL
3867static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
3868{
3869 bool guest_gso = virtnet_check_guest_gso(vi);
3870
3871 /* If device can receive ANY guest GSO packets, regardless of mtu,
3872 * allocate packets of maximum size, otherwise limit it to only
3873 * mtu size worth only.
3874 */
3875 if (mtu > ETH_DATA_LEN || guest_gso) {
3876 vi->big_packets = true;
3877 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE);
3878 }
3879}
3880
fe36cbe0
MT
3881static int virtnet_probe(struct virtio_device *vdev)
3882{
d7dfc5cf 3883 int i, err = -ENOMEM;
fe36cbe0
MT
3884 struct net_device *dev;
3885 struct virtnet_info *vi;
3886 u16 max_queue_pairs;
4959aebb 3887 int mtu = 0;
fe36cbe0 3888
c7114b12
AM
3889 /* Find if host supports multiqueue/rss virtio_net device */
3890 max_queue_pairs = 1;
3891 if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
3892 max_queue_pairs =
3893 virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs));
986a4f4d
JW
3894
3895 /* We need at least 2 queue's */
c7114b12 3896 if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
986a4f4d
JW
3897 max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
3898 !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
3899 max_queue_pairs = 1;
296f96fc
RR
3900
3901 /* Allocate ourselves a network device with room for our info */
986a4f4d 3902 dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
296f96fc
RR
3903 if (!dev)
3904 return -ENOMEM;
3905
3906 /* Set up network device as normal. */
ab5bd583
XZ
3907 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
3908 IFF_TX_SKB_NO_LINEAR;
76288b4e 3909 dev->netdev_ops = &virtnet_netdev;
296f96fc 3910 dev->features = NETIF_F_HIGHDMA;
3fa2a1df 3911
7ad24ea4 3912 dev->ethtool_ops = &virtnet_ethtool_ops;
296f96fc
RR
3913 SET_NETDEV_DEV(dev, &vdev->dev);
3914
3915 /* Do we support "hardware" checksums? */
98e778c9 3916 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
296f96fc 3917 /* This opens up the world of extra features. */
48900cb6 3918 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
98e778c9 3919 if (csum)
48900cb6 3920 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
98e778c9
MM
3921
3922 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
e078de03 3923 dev->hw_features |= NETIF_F_TSO
34a48579
RR
3924 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
3925 }
5539ae96 3926 /* Individual feature bits: what can host handle? */
98e778c9
MM
3927 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
3928 dev->hw_features |= NETIF_F_TSO;
3929 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
3930 dev->hw_features |= NETIF_F_TSO6;
3931 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
3932 dev->hw_features |= NETIF_F_TSO_ECN;
418044e1
AM
3933 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO))
3934 dev->hw_features |= NETIF_F_GSO_UDP_L4;
98e778c9 3935
41f2f127
JW
3936 dev->features |= NETIF_F_GSO_ROBUST;
3937
98e778c9 3938 if (gso)
e078de03 3939 dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
98e778c9 3940 /* (!csum && gso) case will be fixed by register_netdev() */
296f96fc 3941 }
4f49129b
TH
3942 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
3943 dev->features |= NETIF_F_RXCSUM;
a02e8964
WB
3944 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3945 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
dbcf24d1 3946 dev->features |= NETIF_F_GRO_HW;
cf8691cb 3947 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
dbcf24d1 3948 dev->hw_features |= NETIF_F_GRO_HW;
296f96fc 3949
4fda8302 3950 dev->vlan_features = dev->features;
66c0e13a 3951 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
4fda8302 3952
d0c2c997
JW
3953 /* MTU range: 68 - 65535 */
3954 dev->min_mtu = MIN_MTU;
3955 dev->max_mtu = MAX_MTU;
3956
296f96fc 3957 /* Configuration may specify what MAC to use. Otherwise random. */
f2edaa4a
JK
3958 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
3959 u8 addr[ETH_ALEN];
3960
855e0c52
RR
3961 virtio_cread_bytes(vdev,
3962 offsetof(struct virtio_net_config, mac),
f2edaa4a
JK
3963 addr, ETH_ALEN);
3964 eth_hw_addr_set(dev, addr);
3965 } else {
f2cedb63 3966 eth_hw_addr_random(dev);
9f62d221
LV
3967 dev_info(&vdev->dev, "Assigned random MAC address %pM\n",
3968 dev->dev_addr);
f2edaa4a 3969 }
296f96fc
RR
3970
3971 /* Set up our device-specific information */
3972 vi = netdev_priv(dev);
296f96fc
RR
3973 vi->dev = dev;
3974 vi->vdev = vdev;
d9d5dcc8 3975 vdev->priv = vi;
827da44c 3976
586d17c5 3977 INIT_WORK(&vi->config_work, virtnet_config_changed_work);
5a159128 3978 spin_lock_init(&vi->refill_lock);
296f96fc 3979
30bbf891 3980 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
3f2c31d9 3981 vi->mergeable_rx_bufs = true;
30bbf891
LB
3982 dev->xdp_features |= NETDEV_XDP_ACT_RX_SG;
3983 }
3f2c31d9 3984
699b045a
AK
3985 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
3986 vi->rx_usecs = 0;
3987 vi->tx_usecs = 0;
3988 vi->tx_max_packets = 0;
3989 vi->rx_max_packets = 0;
3990 }
3991
91f41f01
AM
3992 if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
3993 vi->has_rss_hash_report = true;
3994
3995 if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
c7114b12 3996 vi->has_rss = true;
91f41f01
AM
3997
3998 if (vi->has_rss || vi->has_rss_hash_report) {
c7114b12
AM
3999 vi->rss_indir_table_size =
4000 virtio_cread16(vdev, offsetof(struct virtio_net_config,
4001 rss_max_indirection_table_length));
4002 vi->rss_key_size =
4003 virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
4004
4005 vi->rss_hash_types_supported =
4006 virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
4007 vi->rss_hash_types_supported &=
4008 ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX |
4009 VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
4010 VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);
4011
4012 dev->hw_features |= NETIF_F_RXHASH;
4013 }
91f41f01
AM
4014
4015 if (vi->has_rss_hash_report)
4016 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
4017 else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
4018 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
012873d0
MT
4019 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
4020 else
4021 vi->hdr_len = sizeof(struct virtio_net_hdr);
4022
75993300
MT
4023 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
4024 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
e7428e95
MT
4025 vi->any_header_sg = true;
4026
986a4f4d
JW
4027 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
4028 vi->has_cvq = true;
4029
14de9d11
AC
4030 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
4031 mtu = virtio_cread16(vdev,
4032 offsetof(struct virtio_net_config,
4033 mtu));
93a205ee 4034 if (mtu < dev->min_mtu) {
fe36cbe0
MT
4035 /* Should never trigger: MTU was previously validated
4036 * in virtnet_validate.
4037 */
7934b481
YS
4038 dev_err(&vdev->dev,
4039 "device MTU appears to have changed it is now %d < %d",
4040 mtu, dev->min_mtu);
411ea23a 4041 err = -EINVAL;
d7dfc5cf 4042 goto free;
93a205ee 4043 }
2e123b44 4044
fe36cbe0
MT
4045 dev->mtu = mtu;
4046 dev->max_mtu = mtu;
14de9d11
AC
4047 }
4048
4959aebb
GL
4049 virtnet_set_big_packets(vi, mtu);
4050
012873d0
MT
4051 if (vi->any_header_sg)
4052 dev->needed_headroom = vi->hdr_len;
6ebbc1a6 4053
44900010
JW
4054 /* Enable multiqueue by default */
4055 if (num_online_cpus() >= max_queue_pairs)
4056 vi->curr_queue_pairs = max_queue_pairs;
4057 else
4058 vi->curr_queue_pairs = num_online_cpus();
986a4f4d
JW
4059 vi->max_queue_pairs = max_queue_pairs;
4060
4061 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
3f9c10b0 4062 err = init_vqs(vi);
d2a7ddda 4063 if (err)
d7dfc5cf 4064 goto free;
296f96fc 4065
fbf28d78
MD
4066#ifdef CONFIG_SYSFS
4067 if (vi->mergeable_rx_bufs)
4068 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
4069#endif
0f13b66b
ZYW
4070 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
4071 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
986a4f4d 4072
2e9ca760
MT
4073 virtnet_init_settings(dev);
4074
ba5e4426
SS
4075 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
4076 vi->failover = net_failover_create(vi->dev);
4b8e6ac4
WY
4077 if (IS_ERR(vi->failover)) {
4078 err = PTR_ERR(vi->failover);
ba5e4426 4079 goto free_vqs;
4b8e6ac4 4080 }
ba5e4426
SS
4081 }
4082
91f41f01 4083 if (vi->has_rss || vi->has_rss_hash_report)
c7114b12
AM
4084 virtnet_init_default_rss(vi);
4085
50c0ada6
JW
4086 /* serialize netdev register + virtio_device_ready() with ndo_open() */
4087 rtnl_lock();
4088
4089 err = register_netdevice(dev);
296f96fc
RR
4090 if (err) {
4091 pr_debug("virtio_net: registering device failed\n");
50c0ada6 4092 rtnl_unlock();
ba5e4426 4093 goto free_failover;
296f96fc 4094 }
b3369c1f 4095
4baf1e33
MT
4096 virtio_device_ready(vdev);
4097
9f62d221
LV
4098 /* a random MAC address has been assigned, notify the device.
4099 * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
4100 * because many devices work fine without getting MAC explicitly
4101 */
4102 if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
4103 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
4104 struct scatterlist sg;
4105
4106 sg_init_one(&sg, dev->dev_addr, dev->addr_len);
4107 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
4108 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
4109 pr_debug("virtio_net: setting MAC address failed\n");
4110 rtnl_unlock();
4111 err = -EINVAL;
4112 goto free_unregister_netdev;
4113 }
4114 }
4115
50c0ada6
JW
4116 rtnl_unlock();
4117
8017c279 4118 err = virtnet_cpu_notif_add(vi);
8de4b2f3
WG
4119 if (err) {
4120 pr_debug("virtio_net: registering cpu notifier failed\n");
f00e35e2 4121 goto free_unregister_netdev;
8de4b2f3
WG
4122 }
4123
a220871b 4124 virtnet_set_queues(vi, vi->curr_queue_pairs);
44900010 4125
167c25e4
JW
4126 /* Assume link up if device can't report link status,
4127 otherwise get link status from config. */
bda7fab5 4128 netif_carrier_off(dev);
167c25e4 4129 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
3b07e9ca 4130 schedule_work(&vi->config_work);
167c25e4
JW
4131 } else {
4132 vi->status = VIRTIO_NET_S_LINK_UP;
faa9b39f 4133 virtnet_update_settings(vi);
167c25e4
JW
4134 netif_carrier_on(dev);
4135 }
9f4d26d0 4136
3f93522f
JW
4137 for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
4138 if (virtio_has_feature(vi->vdev, guest_offloads[i]))
4139 set_bit(guest_offloads[i], &vi->guest_offloads);
a02e8964 4140 vi->guest_offloads_capable = vi->guest_offloads;
3f93522f 4141
986a4f4d
JW
4142 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
4143 dev->name, max_queue_pairs);
4144
296f96fc
RR
4145 return 0;
4146
f00e35e2 4147free_unregister_netdev:
b3369c1f 4148 unregister_netdev(dev);
ba5e4426
SS
4149free_failover:
4150 net_failover_destroy(vi->failover);
d2a7ddda 4151free_vqs:
b0686565 4152 virtio_reset_device(vdev);
986a4f4d 4153 cancel_delayed_work_sync(&vi->refill);
fb51879d 4154 free_receive_page_frags(vi);
e9d7417b 4155 virtnet_del_vqs(vi);
296f96fc
RR
4156free:
4157 free_netdev(dev);
4158 return err;
4159}
4160
04486ed0 4161static void remove_vq_common(struct virtnet_info *vi)
296f96fc 4162{
d9679d00 4163 virtio_reset_device(vi->vdev);
830a8a97
SM
4164
4165 /* Free unused buffers in both send and recv, if any. */
9ab86bbc 4166 free_unused_bufs(vi);
fb6813f4 4167
986a4f4d 4168 free_receive_bufs(vi);
d2a7ddda 4169
fb51879d
MD
4170 free_receive_page_frags(vi);
4171
986a4f4d 4172 virtnet_del_vqs(vi);
04486ed0
AS
4173}
4174
8cc085d6 4175static void virtnet_remove(struct virtio_device *vdev)
04486ed0
AS
4176{
4177 struct virtnet_info *vi = vdev->priv;
4178
8017c279 4179 virtnet_cpu_notif_remove(vi);
8de4b2f3 4180
102a2786
MT
4181 /* Make sure no work handler is accessing the device. */
4182 flush_work(&vi->config_work);
586d17c5 4183
04486ed0
AS
4184 unregister_netdev(vi->dev);
4185
ba5e4426
SS
4186 net_failover_destroy(vi->failover);
4187
04486ed0 4188 remove_vq_common(vi);
fb6813f4 4189
74b2553f 4190 free_netdev(vi->dev);
296f96fc
RR
4191}
4192
67a75194 4193static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
0741bcb5
AS
4194{
4195 struct virtnet_info *vi = vdev->priv;
4196
8017c279 4197 virtnet_cpu_notif_remove(vi);
9fe7bfce 4198 virtnet_freeze_down(vdev);
0741bcb5
AS
4199 remove_vq_common(vi);
4200
4201 return 0;
4202}
4203
67a75194 4204static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
0741bcb5
AS
4205{
4206 struct virtnet_info *vi = vdev->priv;
9fe7bfce 4207 int err;
0741bcb5 4208
9fe7bfce 4209 err = virtnet_restore_up(vdev);
0741bcb5
AS
4210 if (err)
4211 return err;
986a4f4d
JW
4212 virtnet_set_queues(vi, vi->curr_queue_pairs);
4213
8017c279 4214 err = virtnet_cpu_notif_add(vi);
3f2869ca
XY
4215 if (err) {
4216 virtnet_freeze_down(vdev);
4217 remove_vq_common(vi);
ec9debbd 4218 return err;
3f2869ca 4219 }
ec9debbd 4220
0741bcb5
AS
4221 return 0;
4222}
0741bcb5 4223
296f96fc
RR
4224static struct virtio_device_id id_table[] = {
4225 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
4226 { 0 },
4227};
4228
f3358507
MT
4229#define VIRTNET_FEATURES \
4230 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
4231 VIRTIO_NET_F_MAC, \
4232 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
4233 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
4234 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
418044e1 4235 VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \
f3358507
MT
4236 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
4237 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
4238 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
4239 VIRTIO_NET_F_CTRL_MAC_ADDR, \
faa9b39f 4240 VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
c7114b12 4241 VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
be50da3e
JP
4242 VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
4243 VIRTIO_NET_F_GUEST_HDRLEN
f3358507 4244
c45a6816 4245static unsigned int features[] = {
f3358507
MT
4246 VIRTNET_FEATURES,
4247};
4248
4249static unsigned int features_legacy[] = {
4250 VIRTNET_FEATURES,
4251 VIRTIO_NET_F_GSO,
e7428e95 4252 VIRTIO_F_ANY_LAYOUT,
c45a6816
RR
4253};
4254
22402529 4255static struct virtio_driver virtio_net_driver = {
c45a6816
RR
4256 .feature_table = features,
4257 .feature_table_size = ARRAY_SIZE(features),
f3358507
MT
4258 .feature_table_legacy = features_legacy,
4259 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
296f96fc
RR
4260 .driver.name = KBUILD_MODNAME,
4261 .driver.owner = THIS_MODULE,
4262 .id_table = id_table,
fe36cbe0 4263 .validate = virtnet_validate,
296f96fc 4264 .probe = virtnet_probe,
8cc085d6 4265 .remove = virtnet_remove,
9f4d26d0 4266 .config_changed = virtnet_config_changed,
89107000 4267#ifdef CONFIG_PM_SLEEP
0741bcb5
AS
4268 .freeze = virtnet_freeze,
4269 .restore = virtnet_restore,
4270#endif
296f96fc
RR
4271};
4272
8017c279
SAS
4273static __init int virtio_net_driver_init(void)
4274{
4275 int ret;
4276
73c1b41e 4277 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
8017c279
SAS
4278 virtnet_cpu_online,
4279 virtnet_cpu_down_prep);
4280 if (ret < 0)
4281 goto out;
4282 virtionet_online = ret;
73c1b41e 4283 ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
8017c279
SAS
4284 NULL, virtnet_cpu_dead);
4285 if (ret)
4286 goto err_dead;
4f50ef15 4287 ret = register_virtio_driver(&virtio_net_driver);
8017c279
SAS
4288 if (ret)
4289 goto err_virtio;
4290 return 0;
4291err_virtio:
4292 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
4293err_dead:
4294 cpuhp_remove_multi_state(virtionet_online);
4295out:
4296 return ret;
4297}
4298module_init(virtio_net_driver_init);
4299
4300static __exit void virtio_net_driver_exit(void)
4301{
cfa0ebc9 4302 unregister_virtio_driver(&virtio_net_driver);
8017c279
SAS
4303 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
4304 cpuhp_remove_multi_state(virtionet_online);
8017c279
SAS
4305}
4306module_exit(virtio_net_driver_exit);
296f96fc
RR
4307
4308MODULE_DEVICE_TABLE(virtio, id_table);
4309MODULE_DESCRIPTION("Virtio network driver");
4310MODULE_LICENSE("GPL");