Merge tag 'clang-format-6.8' of https://github.com/ojeda/linux
[linux-2.6-block.git] / drivers / net / virtio_net.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* A network driver using virtio.
3  *
4  * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5  */
6 //#define DEBUG
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/module.h>
11 #include <linux/virtio.h>
12 #include <linux/virtio_net.h>
13 #include <linux/bpf.h>
14 #include <linux/bpf_trace.h>
15 #include <linux/scatterlist.h>
16 #include <linux/if_vlan.h>
17 #include <linux/slab.h>
18 #include <linux/cpu.h>
19 #include <linux/average.h>
20 #include <linux/filter.h>
21 #include <linux/kernel.h>
22 #include <linux/dim.h>
23 #include <net/route.h>
24 #include <net/xdp.h>
25 #include <net/net_failover.h>
26 #include <net/netdev_rx_queue.h>
27
28 static int napi_weight = NAPI_POLL_WEIGHT;
29 module_param(napi_weight, int, 0444);
30
31 static bool csum = true, gso = true, napi_tx = true;
32 module_param(csum, bool, 0444);
33 module_param(gso, bool, 0444);
34 module_param(napi_tx, bool, 0644);
35
36 /* FIXME: MTU in config. */
37 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
38 #define GOOD_COPY_LEN   128
39
40 #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
41
42 /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
43 #define VIRTIO_XDP_HEADROOM 256
44
45 /* Separating two types of XDP xmit */
46 #define VIRTIO_XDP_TX           BIT(0)
47 #define VIRTIO_XDP_REDIR        BIT(1)
48
49 #define VIRTIO_XDP_FLAG BIT(0)
50
51 /* RX packet size EWMA. The average packet size is used to determine the packet
52  * buffer size when refilling RX rings. As the entire RX ring may be refilled
53  * at once, the weight is chosen so that the EWMA will be insensitive to short-
54  * term, transient changes in packet size.
55  */
56 DECLARE_EWMA(pkt_len, 0, 64)
57
58 #define VIRTNET_DRIVER_VERSION "1.0.0"
59
60 static const unsigned long guest_offloads[] = {
61         VIRTIO_NET_F_GUEST_TSO4,
62         VIRTIO_NET_F_GUEST_TSO6,
63         VIRTIO_NET_F_GUEST_ECN,
64         VIRTIO_NET_F_GUEST_UFO,
65         VIRTIO_NET_F_GUEST_CSUM,
66         VIRTIO_NET_F_GUEST_USO4,
67         VIRTIO_NET_F_GUEST_USO6,
68         VIRTIO_NET_F_GUEST_HDRLEN
69 };
70
71 #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
72                                 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
73                                 (1ULL << VIRTIO_NET_F_GUEST_ECN)  | \
74                                 (1ULL << VIRTIO_NET_F_GUEST_UFO)  | \
75                                 (1ULL << VIRTIO_NET_F_GUEST_USO4) | \
76                                 (1ULL << VIRTIO_NET_F_GUEST_USO6))
77
78 struct virtnet_stat_desc {
79         char desc[ETH_GSTRING_LEN];
80         size_t offset;
81 };
82
83 struct virtnet_sq_stats {
84         struct u64_stats_sync syncp;
85         u64_stats_t packets;
86         u64_stats_t bytes;
87         u64_stats_t xdp_tx;
88         u64_stats_t xdp_tx_drops;
89         u64_stats_t kicks;
90         u64_stats_t tx_timeouts;
91 };
92
93 struct virtnet_rq_stats {
94         struct u64_stats_sync syncp;
95         u64_stats_t packets;
96         u64_stats_t bytes;
97         u64_stats_t drops;
98         u64_stats_t xdp_packets;
99         u64_stats_t xdp_tx;
100         u64_stats_t xdp_redirects;
101         u64_stats_t xdp_drops;
102         u64_stats_t kicks;
103 };
104
105 #define VIRTNET_SQ_STAT(m)      offsetof(struct virtnet_sq_stats, m)
106 #define VIRTNET_RQ_STAT(m)      offsetof(struct virtnet_rq_stats, m)
107
108 static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
109         { "packets",            VIRTNET_SQ_STAT(packets) },
110         { "bytes",              VIRTNET_SQ_STAT(bytes) },
111         { "xdp_tx",             VIRTNET_SQ_STAT(xdp_tx) },
112         { "xdp_tx_drops",       VIRTNET_SQ_STAT(xdp_tx_drops) },
113         { "kicks",              VIRTNET_SQ_STAT(kicks) },
114         { "tx_timeouts",        VIRTNET_SQ_STAT(tx_timeouts) },
115 };
116
117 static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
118         { "packets",            VIRTNET_RQ_STAT(packets) },
119         { "bytes",              VIRTNET_RQ_STAT(bytes) },
120         { "drops",              VIRTNET_RQ_STAT(drops) },
121         { "xdp_packets",        VIRTNET_RQ_STAT(xdp_packets) },
122         { "xdp_tx",             VIRTNET_RQ_STAT(xdp_tx) },
123         { "xdp_redirects",      VIRTNET_RQ_STAT(xdp_redirects) },
124         { "xdp_drops",          VIRTNET_RQ_STAT(xdp_drops) },
125         { "kicks",              VIRTNET_RQ_STAT(kicks) },
126 };
127
128 #define VIRTNET_SQ_STATS_LEN    ARRAY_SIZE(virtnet_sq_stats_desc)
129 #define VIRTNET_RQ_STATS_LEN    ARRAY_SIZE(virtnet_rq_stats_desc)
130
131 struct virtnet_interrupt_coalesce {
132         u32 max_packets;
133         u32 max_usecs;
134 };
135
136 /* The dma information of pages allocated at a time. */
137 struct virtnet_rq_dma {
138         dma_addr_t addr;
139         u32 ref;
140         u16 len;
141         u16 need_sync;
142 };
143
144 /* Internal representation of a send virtqueue */
145 struct send_queue {
146         /* Virtqueue associated with this send _queue */
147         struct virtqueue *vq;
148
149         /* TX: fragments + linear part + virtio header */
150         struct scatterlist sg[MAX_SKB_FRAGS + 2];
151
152         /* Name of the send queue: output.$index */
153         char name[16];
154
155         struct virtnet_sq_stats stats;
156
157         struct virtnet_interrupt_coalesce intr_coal;
158
159         struct napi_struct napi;
160
161         /* Record whether sq is in reset state. */
162         bool reset;
163 };
164
165 /* Internal representation of a receive virtqueue */
166 struct receive_queue {
167         /* Virtqueue associated with this receive_queue */
168         struct virtqueue *vq;
169
170         struct napi_struct napi;
171
172         struct bpf_prog __rcu *xdp_prog;
173
174         struct virtnet_rq_stats stats;
175
176         /* The number of rx notifications */
177         u16 calls;
178
179         /* Is dynamic interrupt moderation enabled? */
180         bool dim_enabled;
181
182         /* Dynamic Interrupt Moderation */
183         struct dim dim;
184
185         u32 packets_in_napi;
186
187         struct virtnet_interrupt_coalesce intr_coal;
188
189         /* Chain pages by the private ptr. */
190         struct page *pages;
191
192         /* Average packet length for mergeable receive buffers. */
193         struct ewma_pkt_len mrg_avg_pkt_len;
194
195         /* Page frag for packet buffer allocation. */
196         struct page_frag alloc_frag;
197
198         /* RX: fragments + linear part + virtio header */
199         struct scatterlist sg[MAX_SKB_FRAGS + 2];
200
201         /* Min single buffer size for mergeable buffers case. */
202         unsigned int min_buf_len;
203
204         /* Name of this receive queue: input.$index */
205         char name[16];
206
207         struct xdp_rxq_info xdp_rxq;
208
209         /* Record the last dma info to free after new pages is allocated. */
210         struct virtnet_rq_dma *last_dma;
211
212         /* Do dma by self */
213         bool do_dma;
214 };
215
216 /* This structure can contain rss message with maximum settings for indirection table and keysize
217  * Note, that default structure that describes RSS configuration virtio_net_rss_config
218  * contains same info but can't handle table values.
219  * In any case, structure would be passed to virtio hw through sg_buf split by parts
220  * because table sizes may be differ according to the device configuration.
221  */
222 #define VIRTIO_NET_RSS_MAX_KEY_SIZE     40
223 #define VIRTIO_NET_RSS_MAX_TABLE_LEN    128
224 struct virtio_net_ctrl_rss {
225         u32 hash_types;
226         u16 indirection_table_mask;
227         u16 unclassified_queue;
228         u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN];
229         u16 max_tx_vq;
230         u8 hash_key_length;
231         u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
232 };
233
234 /* Control VQ buffers: protected by the rtnl lock */
235 struct control_buf {
236         struct virtio_net_ctrl_hdr hdr;
237         virtio_net_ctrl_ack status;
238         struct virtio_net_ctrl_mq mq;
239         u8 promisc;
240         u8 allmulti;
241         __virtio16 vid;
242         __virtio64 offloads;
243         struct virtio_net_ctrl_rss rss;
244         struct virtio_net_ctrl_coal_tx coal_tx;
245         struct virtio_net_ctrl_coal_rx coal_rx;
246         struct virtio_net_ctrl_coal_vq coal_vq;
247 };
248
249 struct virtnet_info {
250         struct virtio_device *vdev;
251         struct virtqueue *cvq;
252         struct net_device *dev;
253         struct send_queue *sq;
254         struct receive_queue *rq;
255         unsigned int status;
256
257         /* Max # of queue pairs supported by the device */
258         u16 max_queue_pairs;
259
260         /* # of queue pairs currently used by the driver */
261         u16 curr_queue_pairs;
262
263         /* # of XDP queue pairs currently used by the driver */
264         u16 xdp_queue_pairs;
265
266         /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
267         bool xdp_enabled;
268
269         /* I like... big packets and I cannot lie! */
270         bool big_packets;
271
272         /* number of sg entries allocated for big packets */
273         unsigned int big_packets_num_skbfrags;
274
275         /* Host will merge rx buffers for big packets (shake it! shake it!) */
276         bool mergeable_rx_bufs;
277
278         /* Host supports rss and/or hash report */
279         bool has_rss;
280         bool has_rss_hash_report;
281         u8 rss_key_size;
282         u16 rss_indir_table_size;
283         u32 rss_hash_types_supported;
284         u32 rss_hash_types_saved;
285
286         /* Has control virtqueue */
287         bool has_cvq;
288
289         /* Host can handle any s/g split between our header and packet data */
290         bool any_header_sg;
291
292         /* Packet virtio header size */
293         u8 hdr_len;
294
295         /* Work struct for delayed refilling if we run low on memory. */
296         struct delayed_work refill;
297
298         /* Is delayed refill enabled? */
299         bool refill_enabled;
300
301         /* The lock to synchronize the access to refill_enabled */
302         spinlock_t refill_lock;
303
304         /* Work struct for config space updates */
305         struct work_struct config_work;
306
307         /* Does the affinity hint is set for virtqueues? */
308         bool affinity_hint_set;
309
310         /* CPU hotplug instances for online & dead */
311         struct hlist_node node;
312         struct hlist_node node_dead;
313
314         struct control_buf *ctrl;
315
316         /* Ethtool settings */
317         u8 duplex;
318         u32 speed;
319
320         /* Is rx dynamic interrupt moderation enabled? */
321         bool rx_dim_enabled;
322
323         /* Interrupt coalescing settings */
324         struct virtnet_interrupt_coalesce intr_coal_tx;
325         struct virtnet_interrupt_coalesce intr_coal_rx;
326
327         unsigned long guest_offloads;
328         unsigned long guest_offloads_capable;
329
330         /* failover when STANDBY feature enabled */
331         struct failover *failover;
332 };
333
334 struct padded_vnet_hdr {
335         struct virtio_net_hdr_v1_hash hdr;
336         /*
337          * hdr is in a separate sg buffer, and data sg buffer shares same page
338          * with this header sg. This padding makes next sg 16 byte aligned
339          * after the header.
340          */
341         char padding[12];
342 };
343
344 struct virtio_net_common_hdr {
345         union {
346                 struct virtio_net_hdr hdr;
347                 struct virtio_net_hdr_mrg_rxbuf mrg_hdr;
348                 struct virtio_net_hdr_v1_hash hash_v1_hdr;
349         };
350 };
351
352 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
353
354 static bool is_xdp_frame(void *ptr)
355 {
356         return (unsigned long)ptr & VIRTIO_XDP_FLAG;
357 }
358
359 static void *xdp_to_ptr(struct xdp_frame *ptr)
360 {
361         return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
362 }
363
364 static struct xdp_frame *ptr_to_xdp(void *ptr)
365 {
366         return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
367 }
368
369 /* Converting between virtqueue no. and kernel tx/rx queue no.
370  * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
371  */
372 static int vq2txq(struct virtqueue *vq)
373 {
374         return (vq->index - 1) / 2;
375 }
376
377 static int txq2vq(int txq)
378 {
379         return txq * 2 + 1;
380 }
381
382 static int vq2rxq(struct virtqueue *vq)
383 {
384         return vq->index / 2;
385 }
386
387 static int rxq2vq(int rxq)
388 {
389         return rxq * 2;
390 }
391
392 static inline struct virtio_net_common_hdr *
393 skb_vnet_common_hdr(struct sk_buff *skb)
394 {
395         return (struct virtio_net_common_hdr *)skb->cb;
396 }
397
398 /*
399  * private is used to chain pages for big packets, put the whole
400  * most recent used list in the beginning for reuse
401  */
402 static void give_pages(struct receive_queue *rq, struct page *page)
403 {
404         struct page *end;
405
406         /* Find end of list, sew whole thing into vi->rq.pages. */
407         for (end = page; end->private; end = (struct page *)end->private);
408         end->private = (unsigned long)rq->pages;
409         rq->pages = page;
410 }
411
412 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
413 {
414         struct page *p = rq->pages;
415
416         if (p) {
417                 rq->pages = (struct page *)p->private;
418                 /* clear private here, it is used to chain pages */
419                 p->private = 0;
420         } else
421                 p = alloc_page(gfp_mask);
422         return p;
423 }
424
425 static void virtnet_rq_free_buf(struct virtnet_info *vi,
426                                 struct receive_queue *rq, void *buf)
427 {
428         if (vi->mergeable_rx_bufs)
429                 put_page(virt_to_head_page(buf));
430         else if (vi->big_packets)
431                 give_pages(rq, buf);
432         else
433                 put_page(virt_to_head_page(buf));
434 }
435
436 static void enable_delayed_refill(struct virtnet_info *vi)
437 {
438         spin_lock_bh(&vi->refill_lock);
439         vi->refill_enabled = true;
440         spin_unlock_bh(&vi->refill_lock);
441 }
442
443 static void disable_delayed_refill(struct virtnet_info *vi)
444 {
445         spin_lock_bh(&vi->refill_lock);
446         vi->refill_enabled = false;
447         spin_unlock_bh(&vi->refill_lock);
448 }
449
450 static void virtqueue_napi_schedule(struct napi_struct *napi,
451                                     struct virtqueue *vq)
452 {
453         if (napi_schedule_prep(napi)) {
454                 virtqueue_disable_cb(vq);
455                 __napi_schedule(napi);
456         }
457 }
458
459 static bool virtqueue_napi_complete(struct napi_struct *napi,
460                                     struct virtqueue *vq, int processed)
461 {
462         int opaque;
463
464         opaque = virtqueue_enable_cb_prepare(vq);
465         if (napi_complete_done(napi, processed)) {
466                 if (unlikely(virtqueue_poll(vq, opaque)))
467                         virtqueue_napi_schedule(napi, vq);
468                 else
469                         return true;
470         } else {
471                 virtqueue_disable_cb(vq);
472         }
473
474         return false;
475 }
476
477 static void skb_xmit_done(struct virtqueue *vq)
478 {
479         struct virtnet_info *vi = vq->vdev->priv;
480         struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
481
482         /* Suppress further interrupts. */
483         virtqueue_disable_cb(vq);
484
485         if (napi->weight)
486                 virtqueue_napi_schedule(napi, vq);
487         else
488                 /* We were probably waiting for more output buffers. */
489                 netif_wake_subqueue(vi->dev, vq2txq(vq));
490 }
491
492 #define MRG_CTX_HEADER_SHIFT 22
493 static void *mergeable_len_to_ctx(unsigned int truesize,
494                                   unsigned int headroom)
495 {
496         return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
497 }
498
499 static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
500 {
501         return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
502 }
503
504 static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
505 {
506         return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
507 }
508
509 static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen,
510                                          unsigned int headroom,
511                                          unsigned int len)
512 {
513         struct sk_buff *skb;
514
515         skb = build_skb(buf, buflen);
516         if (unlikely(!skb))
517                 return NULL;
518
519         skb_reserve(skb, headroom);
520         skb_put(skb, len);
521
522         return skb;
523 }
524
525 /* Called from bottom half context */
526 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
527                                    struct receive_queue *rq,
528                                    struct page *page, unsigned int offset,
529                                    unsigned int len, unsigned int truesize,
530                                    unsigned int headroom)
531 {
532         struct sk_buff *skb;
533         struct virtio_net_common_hdr *hdr;
534         unsigned int copy, hdr_len, hdr_padded_len;
535         struct page *page_to_free = NULL;
536         int tailroom, shinfo_size;
537         char *p, *hdr_p, *buf;
538
539         p = page_address(page) + offset;
540         hdr_p = p;
541
542         hdr_len = vi->hdr_len;
543         if (vi->mergeable_rx_bufs)
544                 hdr_padded_len = hdr_len;
545         else
546                 hdr_padded_len = sizeof(struct padded_vnet_hdr);
547
548         buf = p - headroom;
549         len -= hdr_len;
550         offset += hdr_padded_len;
551         p += hdr_padded_len;
552         tailroom = truesize - headroom  - hdr_padded_len - len;
553
554         shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
555
556         /* copy small packet so we can reuse these pages */
557         if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
558                 skb = virtnet_build_skb(buf, truesize, p - buf, len);
559                 if (unlikely(!skb))
560                         return NULL;
561
562                 page = (struct page *)page->private;
563                 if (page)
564                         give_pages(rq, page);
565                 goto ok;
566         }
567
568         /* copy small packet so we can reuse these pages for small data */
569         skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
570         if (unlikely(!skb))
571                 return NULL;
572
573         /* Copy all frame if it fits skb->head, otherwise
574          * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
575          */
576         if (len <= skb_tailroom(skb))
577                 copy = len;
578         else
579                 copy = ETH_HLEN;
580         skb_put_data(skb, p, copy);
581
582         len -= copy;
583         offset += copy;
584
585         if (vi->mergeable_rx_bufs) {
586                 if (len)
587                         skb_add_rx_frag(skb, 0, page, offset, len, truesize);
588                 else
589                         page_to_free = page;
590                 goto ok;
591         }
592
593         /*
594          * Verify that we can indeed put this data into a skb.
595          * This is here to handle cases when the device erroneously
596          * tries to receive more than is possible. This is usually
597          * the case of a broken device.
598          */
599         if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
600                 net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
601                 dev_kfree_skb(skb);
602                 return NULL;
603         }
604         BUG_ON(offset >= PAGE_SIZE);
605         while (len) {
606                 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
607                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
608                                 frag_size, truesize);
609                 len -= frag_size;
610                 page = (struct page *)page->private;
611                 offset = 0;
612         }
613
614         if (page)
615                 give_pages(rq, page);
616
617 ok:
618         hdr = skb_vnet_common_hdr(skb);
619         memcpy(hdr, hdr_p, hdr_len);
620         if (page_to_free)
621                 put_page(page_to_free);
622
623         return skb;
624 }
625
626 static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
627 {
628         struct page *page = virt_to_head_page(buf);
629         struct virtnet_rq_dma *dma;
630         void *head;
631         int offset;
632
633         head = page_address(page);
634
635         dma = head;
636
637         --dma->ref;
638
639         if (dma->need_sync && len) {
640                 offset = buf - (head + sizeof(*dma));
641
642                 virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr,
643                                                         offset, len,
644                                                         DMA_FROM_DEVICE);
645         }
646
647         if (dma->ref)
648                 return;
649
650         virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
651                                          DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
652         put_page(page);
653 }
654
655 static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
656 {
657         void *buf;
658
659         buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
660         if (buf && rq->do_dma)
661                 virtnet_rq_unmap(rq, buf, *len);
662
663         return buf;
664 }
665
666 static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
667 {
668         struct virtnet_rq_dma *dma;
669         dma_addr_t addr;
670         u32 offset;
671         void *head;
672
673         if (!rq->do_dma) {
674                 sg_init_one(rq->sg, buf, len);
675                 return;
676         }
677
678         head = page_address(rq->alloc_frag.page);
679
680         offset = buf - head;
681
682         dma = head;
683
684         addr = dma->addr - sizeof(*dma) + offset;
685
686         sg_init_table(rq->sg, 1);
687         rq->sg[0].dma_address = addr;
688         rq->sg[0].length = len;
689 }
690
691 static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
692 {
693         struct page_frag *alloc_frag = &rq->alloc_frag;
694         struct virtnet_rq_dma *dma;
695         void *buf, *head;
696         dma_addr_t addr;
697
698         if (unlikely(!skb_page_frag_refill(size, alloc_frag, gfp)))
699                 return NULL;
700
701         head = page_address(alloc_frag->page);
702
703         if (rq->do_dma) {
704                 dma = head;
705
706                 /* new pages */
707                 if (!alloc_frag->offset) {
708                         if (rq->last_dma) {
709                                 /* Now, the new page is allocated, the last dma
710                                  * will not be used. So the dma can be unmapped
711                                  * if the ref is 0.
712                                  */
713                                 virtnet_rq_unmap(rq, rq->last_dma, 0);
714                                 rq->last_dma = NULL;
715                         }
716
717                         dma->len = alloc_frag->size - sizeof(*dma);
718
719                         addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
720                                                               dma->len, DMA_FROM_DEVICE, 0);
721                         if (virtqueue_dma_mapping_error(rq->vq, addr))
722                                 return NULL;
723
724                         dma->addr = addr;
725                         dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
726
727                         /* Add a reference to dma to prevent the entire dma from
728                          * being released during error handling. This reference
729                          * will be freed after the pages are no longer used.
730                          */
731                         get_page(alloc_frag->page);
732                         dma->ref = 1;
733                         alloc_frag->offset = sizeof(*dma);
734
735                         rq->last_dma = dma;
736                 }
737
738                 ++dma->ref;
739         }
740
741         buf = head + alloc_frag->offset;
742
743         get_page(alloc_frag->page);
744         alloc_frag->offset += size;
745
746         return buf;
747 }
748
749 static void virtnet_rq_set_premapped(struct virtnet_info *vi)
750 {
751         int i;
752
753         /* disable for big mode */
754         if (!vi->mergeable_rx_bufs && vi->big_packets)
755                 return;
756
757         for (i = 0; i < vi->max_queue_pairs; i++) {
758                 if (virtqueue_set_dma_premapped(vi->rq[i].vq))
759                         continue;
760
761                 vi->rq[i].do_dma = true;
762         }
763 }
764
765 static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
766 {
767         struct virtnet_info *vi = vq->vdev->priv;
768         struct receive_queue *rq;
769         int i = vq2rxq(vq);
770
771         rq = &vi->rq[i];
772
773         if (rq->do_dma)
774                 virtnet_rq_unmap(rq, buf, 0);
775
776         virtnet_rq_free_buf(vi, rq, buf);
777 }
778
779 static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
780 {
781         unsigned int len;
782         unsigned int packets = 0;
783         unsigned int bytes = 0;
784         void *ptr;
785
786         while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
787                 if (likely(!is_xdp_frame(ptr))) {
788                         struct sk_buff *skb = ptr;
789
790                         pr_debug("Sent skb %p\n", skb);
791
792                         bytes += skb->len;
793                         napi_consume_skb(skb, in_napi);
794                 } else {
795                         struct xdp_frame *frame = ptr_to_xdp(ptr);
796
797                         bytes += xdp_get_frame_len(frame);
798                         xdp_return_frame(frame);
799                 }
800                 packets++;
801         }
802
803         /* Avoid overhead when no packets have been processed
804          * happens when called speculatively from start_xmit.
805          */
806         if (!packets)
807                 return;
808
809         u64_stats_update_begin(&sq->stats.syncp);
810         u64_stats_add(&sq->stats.bytes, bytes);
811         u64_stats_add(&sq->stats.packets, packets);
812         u64_stats_update_end(&sq->stats.syncp);
813 }
814
815 static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
816 {
817         if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
818                 return false;
819         else if (q < vi->curr_queue_pairs)
820                 return true;
821         else
822                 return false;
823 }
824
825 static void check_sq_full_and_disable(struct virtnet_info *vi,
826                                       struct net_device *dev,
827                                       struct send_queue *sq)
828 {
829         bool use_napi = sq->napi.weight;
830         int qnum;
831
832         qnum = sq - vi->sq;
833
834         /* If running out of space, stop queue to avoid getting packets that we
835          * are then unable to transmit.
836          * An alternative would be to force queuing layer to requeue the skb by
837          * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
838          * returned in a normal path of operation: it means that driver is not
839          * maintaining the TX queue stop/start state properly, and causes
840          * the stack to do a non-trivial amount of useless work.
841          * Since most packets only take 1 or 2 ring slots, stopping the queue
842          * early means 16 slots are typically wasted.
843          */
844         if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
845                 netif_stop_subqueue(dev, qnum);
846                 if (use_napi) {
847                         if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
848                                 virtqueue_napi_schedule(&sq->napi, sq->vq);
849                 } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
850                         /* More just got used, free them then recheck. */
851                         free_old_xmit_skbs(sq, false);
852                         if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
853                                 netif_start_subqueue(dev, qnum);
854                                 virtqueue_disable_cb(sq->vq);
855                         }
856                 }
857         }
858 }
859
860 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
861                                    struct send_queue *sq,
862                                    struct xdp_frame *xdpf)
863 {
864         struct virtio_net_hdr_mrg_rxbuf *hdr;
865         struct skb_shared_info *shinfo;
866         u8 nr_frags = 0;
867         int err, i;
868
869         if (unlikely(xdpf->headroom < vi->hdr_len))
870                 return -EOVERFLOW;
871
872         if (unlikely(xdp_frame_has_frags(xdpf))) {
873                 shinfo = xdp_get_shared_info_from_frame(xdpf);
874                 nr_frags = shinfo->nr_frags;
875         }
876
877         /* In wrapping function virtnet_xdp_xmit(), we need to free
878          * up the pending old buffers, where we need to calculate the
879          * position of skb_shared_info in xdp_get_frame_len() and
880          * xdp_return_frame(), which will involve to xdpf->data and
881          * xdpf->headroom. Therefore, we need to update the value of
882          * headroom synchronously here.
883          */
884         xdpf->headroom -= vi->hdr_len;
885         xdpf->data -= vi->hdr_len;
886         /* Zero header and leave csum up to XDP layers */
887         hdr = xdpf->data;
888         memset(hdr, 0, vi->hdr_len);
889         xdpf->len   += vi->hdr_len;
890
891         sg_init_table(sq->sg, nr_frags + 1);
892         sg_set_buf(sq->sg, xdpf->data, xdpf->len);
893         for (i = 0; i < nr_frags; i++) {
894                 skb_frag_t *frag = &shinfo->frags[i];
895
896                 sg_set_page(&sq->sg[i + 1], skb_frag_page(frag),
897                             skb_frag_size(frag), skb_frag_off(frag));
898         }
899
900         err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1,
901                                    xdp_to_ptr(xdpf), GFP_ATOMIC);
902         if (unlikely(err))
903                 return -ENOSPC; /* Caller handle free/refcnt */
904
905         return 0;
906 }
907
908 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
909  * the current cpu, so it does not need to be locked.
910  *
911  * Here we use marco instead of inline functions because we have to deal with
912  * three issues at the same time: 1. the choice of sq. 2. judge and execute the
913  * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
914  * functions to perfectly solve these three problems at the same time.
915  */
916 #define virtnet_xdp_get_sq(vi) ({                                       \
917         int cpu = smp_processor_id();                                   \
918         struct netdev_queue *txq;                                       \
919         typeof(vi) v = (vi);                                            \
920         unsigned int qp;                                                \
921                                                                         \
922         if (v->curr_queue_pairs > nr_cpu_ids) {                         \
923                 qp = v->curr_queue_pairs - v->xdp_queue_pairs;          \
924                 qp += cpu;                                              \
925                 txq = netdev_get_tx_queue(v->dev, qp);                  \
926                 __netif_tx_acquire(txq);                                \
927         } else {                                                        \
928                 qp = cpu % v->curr_queue_pairs;                         \
929                 txq = netdev_get_tx_queue(v->dev, qp);                  \
930                 __netif_tx_lock(txq, cpu);                              \
931         }                                                               \
932         v->sq + qp;                                                     \
933 })
934
935 #define virtnet_xdp_put_sq(vi, q) {                                     \
936         struct netdev_queue *txq;                                       \
937         typeof(vi) v = (vi);                                            \
938                                                                         \
939         txq = netdev_get_tx_queue(v->dev, (q) - v->sq);                 \
940         if (v->curr_queue_pairs > nr_cpu_ids)                           \
941                 __netif_tx_release(txq);                                \
942         else                                                            \
943                 __netif_tx_unlock(txq);                                 \
944 }
945
946 static int virtnet_xdp_xmit(struct net_device *dev,
947                             int n, struct xdp_frame **frames, u32 flags)
948 {
949         struct virtnet_info *vi = netdev_priv(dev);
950         struct receive_queue *rq = vi->rq;
951         struct bpf_prog *xdp_prog;
952         struct send_queue *sq;
953         unsigned int len;
954         int packets = 0;
955         int bytes = 0;
956         int nxmit = 0;
957         int kicks = 0;
958         void *ptr;
959         int ret;
960         int i;
961
962         /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
963          * indicate XDP resources have been successfully allocated.
964          */
965         xdp_prog = rcu_access_pointer(rq->xdp_prog);
966         if (!xdp_prog)
967                 return -ENXIO;
968
969         sq = virtnet_xdp_get_sq(vi);
970
971         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
972                 ret = -EINVAL;
973                 goto out;
974         }
975
976         /* Free up any pending old buffers before queueing new ones. */
977         while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
978                 if (likely(is_xdp_frame(ptr))) {
979                         struct xdp_frame *frame = ptr_to_xdp(ptr);
980
981                         bytes += xdp_get_frame_len(frame);
982                         xdp_return_frame(frame);
983                 } else {
984                         struct sk_buff *skb = ptr;
985
986                         bytes += skb->len;
987                         napi_consume_skb(skb, false);
988                 }
989                 packets++;
990         }
991
992         for (i = 0; i < n; i++) {
993                 struct xdp_frame *xdpf = frames[i];
994
995                 if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
996                         break;
997                 nxmit++;
998         }
999         ret = nxmit;
1000
1001         if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
1002                 check_sq_full_and_disable(vi, dev, sq);
1003
1004         if (flags & XDP_XMIT_FLUSH) {
1005                 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
1006                         kicks = 1;
1007         }
1008 out:
1009         u64_stats_update_begin(&sq->stats.syncp);
1010         u64_stats_add(&sq->stats.bytes, bytes);
1011         u64_stats_add(&sq->stats.packets, packets);
1012         u64_stats_add(&sq->stats.xdp_tx, n);
1013         u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
1014         u64_stats_add(&sq->stats.kicks, kicks);
1015         u64_stats_update_end(&sq->stats.syncp);
1016
1017         virtnet_xdp_put_sq(vi, sq);
1018         return ret;
1019 }
1020
1021 static void put_xdp_frags(struct xdp_buff *xdp)
1022 {
1023         struct skb_shared_info *shinfo;
1024         struct page *xdp_page;
1025         int i;
1026
1027         if (xdp_buff_has_frags(xdp)) {
1028                 shinfo = xdp_get_shared_info_from_buff(xdp);
1029                 for (i = 0; i < shinfo->nr_frags; i++) {
1030                         xdp_page = skb_frag_page(&shinfo->frags[i]);
1031                         put_page(xdp_page);
1032                 }
1033         }
1034 }
1035
1036 static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
1037                                struct net_device *dev,
1038                                unsigned int *xdp_xmit,
1039                                struct virtnet_rq_stats *stats)
1040 {
1041         struct xdp_frame *xdpf;
1042         int err;
1043         u32 act;
1044
1045         act = bpf_prog_run_xdp(xdp_prog, xdp);
1046         u64_stats_inc(&stats->xdp_packets);
1047
1048         switch (act) {
1049         case XDP_PASS:
1050                 return act;
1051
1052         case XDP_TX:
1053                 u64_stats_inc(&stats->xdp_tx);
1054                 xdpf = xdp_convert_buff_to_frame(xdp);
1055                 if (unlikely(!xdpf)) {
1056                         netdev_dbg(dev, "convert buff to frame failed for xdp\n");
1057                         return XDP_DROP;
1058                 }
1059
1060                 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
1061                 if (unlikely(!err)) {
1062                         xdp_return_frame_rx_napi(xdpf);
1063                 } else if (unlikely(err < 0)) {
1064                         trace_xdp_exception(dev, xdp_prog, act);
1065                         return XDP_DROP;
1066                 }
1067                 *xdp_xmit |= VIRTIO_XDP_TX;
1068                 return act;
1069
1070         case XDP_REDIRECT:
1071                 u64_stats_inc(&stats->xdp_redirects);
1072                 err = xdp_do_redirect(dev, xdp, xdp_prog);
1073                 if (err)
1074                         return XDP_DROP;
1075
1076                 *xdp_xmit |= VIRTIO_XDP_REDIR;
1077                 return act;
1078
1079         default:
1080                 bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
1081                 fallthrough;
1082         case XDP_ABORTED:
1083                 trace_xdp_exception(dev, xdp_prog, act);
1084                 fallthrough;
1085         case XDP_DROP:
1086                 return XDP_DROP;
1087         }
1088 }
1089
1090 static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
1091 {
1092         return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
1093 }
1094
1095 /* We copy the packet for XDP in the following cases:
1096  *
1097  * 1) Packet is scattered across multiple rx buffers.
1098  * 2) Headroom space is insufficient.
1099  *
1100  * This is inefficient but it's a temporary condition that
1101  * we hit right after XDP is enabled and until queue is refilled
1102  * with large buffers with sufficient headroom - so it should affect
1103  * at most queue size packets.
1104  * Afterwards, the conditions to enable
1105  * XDP should preclude the underlying device from sending packets
1106  * across multiple buffers (num_buf > 1), and we make sure buffers
1107  * have enough headroom.
1108  */
1109 static struct page *xdp_linearize_page(struct receive_queue *rq,
1110                                        int *num_buf,
1111                                        struct page *p,
1112                                        int offset,
1113                                        int page_off,
1114                                        unsigned int *len)
1115 {
1116         int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1117         struct page *page;
1118
1119         if (page_off + *len + tailroom > PAGE_SIZE)
1120                 return NULL;
1121
1122         page = alloc_page(GFP_ATOMIC);
1123         if (!page)
1124                 return NULL;
1125
1126         memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
1127         page_off += *len;
1128
1129         while (--*num_buf) {
1130                 unsigned int buflen;
1131                 void *buf;
1132                 int off;
1133
1134                 buf = virtnet_rq_get_buf(rq, &buflen, NULL);
1135                 if (unlikely(!buf))
1136                         goto err_buf;
1137
1138                 p = virt_to_head_page(buf);
1139                 off = buf - page_address(p);
1140
1141                 /* guard against a misconfigured or uncooperative backend that
1142                  * is sending packet larger than the MTU.
1143                  */
1144                 if ((page_off + buflen + tailroom) > PAGE_SIZE) {
1145                         put_page(p);
1146                         goto err_buf;
1147                 }
1148
1149                 memcpy(page_address(page) + page_off,
1150                        page_address(p) + off, buflen);
1151                 page_off += buflen;
1152                 put_page(p);
1153         }
1154
1155         /* Headroom does not contribute to packet length */
1156         *len = page_off - VIRTIO_XDP_HEADROOM;
1157         return page;
1158 err_buf:
1159         __free_pages(page, 0);
1160         return NULL;
1161 }
1162
1163 static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi,
1164                                                unsigned int xdp_headroom,
1165                                                void *buf,
1166                                                unsigned int len)
1167 {
1168         unsigned int header_offset;
1169         unsigned int headroom;
1170         unsigned int buflen;
1171         struct sk_buff *skb;
1172
1173         header_offset = VIRTNET_RX_PAD + xdp_headroom;
1174         headroom = vi->hdr_len + header_offset;
1175         buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1176                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1177
1178         skb = virtnet_build_skb(buf, buflen, headroom, len);
1179         if (unlikely(!skb))
1180                 return NULL;
1181
1182         buf += header_offset;
1183         memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len);
1184
1185         return skb;
1186 }
1187
1188 static struct sk_buff *receive_small_xdp(struct net_device *dev,
1189                                          struct virtnet_info *vi,
1190                                          struct receive_queue *rq,
1191                                          struct bpf_prog *xdp_prog,
1192                                          void *buf,
1193                                          unsigned int xdp_headroom,
1194                                          unsigned int len,
1195                                          unsigned int *xdp_xmit,
1196                                          struct virtnet_rq_stats *stats)
1197 {
1198         unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
1199         unsigned int headroom = vi->hdr_len + header_offset;
1200         struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
1201         struct page *page = virt_to_head_page(buf);
1202         struct page *xdp_page;
1203         unsigned int buflen;
1204         struct xdp_buff xdp;
1205         struct sk_buff *skb;
1206         unsigned int metasize = 0;
1207         u32 act;
1208
1209         if (unlikely(hdr->hdr.gso_type))
1210                 goto err_xdp;
1211
1212         buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1213                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1214
1215         if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
1216                 int offset = buf - page_address(page) + header_offset;
1217                 unsigned int tlen = len + vi->hdr_len;
1218                 int num_buf = 1;
1219
1220                 xdp_headroom = virtnet_get_headroom(vi);
1221                 header_offset = VIRTNET_RX_PAD + xdp_headroom;
1222                 headroom = vi->hdr_len + header_offset;
1223                 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1224                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1225                 xdp_page = xdp_linearize_page(rq, &num_buf, page,
1226                                               offset, header_offset,
1227                                               &tlen);
1228                 if (!xdp_page)
1229                         goto err_xdp;
1230
1231                 buf = page_address(xdp_page);
1232                 put_page(page);
1233                 page = xdp_page;
1234         }
1235
1236         xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
1237         xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
1238                          xdp_headroom, len, true);
1239
1240         act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1241
1242         switch (act) {
1243         case XDP_PASS:
1244                 /* Recalculate length in case bpf program changed it */
1245                 len = xdp.data_end - xdp.data;
1246                 metasize = xdp.data - xdp.data_meta;
1247                 break;
1248
1249         case XDP_TX:
1250         case XDP_REDIRECT:
1251                 goto xdp_xmit;
1252
1253         default:
1254                 goto err_xdp;
1255         }
1256
1257         skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len);
1258         if (unlikely(!skb))
1259                 goto err;
1260
1261         if (metasize)
1262                 skb_metadata_set(skb, metasize);
1263
1264         return skb;
1265
1266 err_xdp:
1267         u64_stats_inc(&stats->xdp_drops);
1268 err:
1269         u64_stats_inc(&stats->drops);
1270         put_page(page);
1271 xdp_xmit:
1272         return NULL;
1273 }
1274
1275 static struct sk_buff *receive_small(struct net_device *dev,
1276                                      struct virtnet_info *vi,
1277                                      struct receive_queue *rq,
1278                                      void *buf, void *ctx,
1279                                      unsigned int len,
1280                                      unsigned int *xdp_xmit,
1281                                      struct virtnet_rq_stats *stats)
1282 {
1283         unsigned int xdp_headroom = (unsigned long)ctx;
1284         struct page *page = virt_to_head_page(buf);
1285         struct sk_buff *skb;
1286
1287         len -= vi->hdr_len;
1288         u64_stats_add(&stats->bytes, len);
1289
1290         if (unlikely(len > GOOD_PACKET_LEN)) {
1291                 pr_debug("%s: rx error: len %u exceeds max size %d\n",
1292                          dev->name, len, GOOD_PACKET_LEN);
1293                 DEV_STATS_INC(dev, rx_length_errors);
1294                 goto err;
1295         }
1296
1297         if (unlikely(vi->xdp_enabled)) {
1298                 struct bpf_prog *xdp_prog;
1299
1300                 rcu_read_lock();
1301                 xdp_prog = rcu_dereference(rq->xdp_prog);
1302                 if (xdp_prog) {
1303                         skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf,
1304                                                 xdp_headroom, len, xdp_xmit,
1305                                                 stats);
1306                         rcu_read_unlock();
1307                         return skb;
1308                 }
1309                 rcu_read_unlock();
1310         }
1311
1312         skb = receive_small_build_skb(vi, xdp_headroom, buf, len);
1313         if (likely(skb))
1314                 return skb;
1315
1316 err:
1317         u64_stats_inc(&stats->drops);
1318         put_page(page);
1319         return NULL;
1320 }
1321
1322 static struct sk_buff *receive_big(struct net_device *dev,
1323                                    struct virtnet_info *vi,
1324                                    struct receive_queue *rq,
1325                                    void *buf,
1326                                    unsigned int len,
1327                                    struct virtnet_rq_stats *stats)
1328 {
1329         struct page *page = buf;
1330         struct sk_buff *skb =
1331                 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
1332
1333         u64_stats_add(&stats->bytes, len - vi->hdr_len);
1334         if (unlikely(!skb))
1335                 goto err;
1336
1337         return skb;
1338
1339 err:
1340         u64_stats_inc(&stats->drops);
1341         give_pages(rq, page);
1342         return NULL;
1343 }
1344
1345 static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
1346                                struct net_device *dev,
1347                                struct virtnet_rq_stats *stats)
1348 {
1349         struct page *page;
1350         void *buf;
1351         int len;
1352
1353         while (num_buf-- > 1) {
1354                 buf = virtnet_rq_get_buf(rq, &len, NULL);
1355                 if (unlikely(!buf)) {
1356                         pr_debug("%s: rx error: %d buffers missing\n",
1357                                  dev->name, num_buf);
1358                         DEV_STATS_INC(dev, rx_length_errors);
1359                         break;
1360                 }
1361                 u64_stats_add(&stats->bytes, len);
1362                 page = virt_to_head_page(buf);
1363                 put_page(page);
1364         }
1365 }
1366
1367 /* Why not use xdp_build_skb_from_frame() ?
1368  * XDP core assumes that xdp frags are PAGE_SIZE in length, while in
1369  * virtio-net there are 2 points that do not match its requirements:
1370  *  1. The size of the prefilled buffer is not fixed before xdp is set.
1371  *  2. xdp_build_skb_from_frame() does more checks that we don't need,
1372  *     like eth_type_trans() (which virtio-net does in receive_buf()).
1373  */
1374 static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
1375                                                struct virtnet_info *vi,
1376                                                struct xdp_buff *xdp,
1377                                                unsigned int xdp_frags_truesz)
1378 {
1379         struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
1380         unsigned int headroom, data_len;
1381         struct sk_buff *skb;
1382         int metasize;
1383         u8 nr_frags;
1384
1385         if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
1386                 pr_debug("Error building skb as missing reserved tailroom for xdp");
1387                 return NULL;
1388         }
1389
1390         if (unlikely(xdp_buff_has_frags(xdp)))
1391                 nr_frags = sinfo->nr_frags;
1392
1393         skb = build_skb(xdp->data_hard_start, xdp->frame_sz);
1394         if (unlikely(!skb))
1395                 return NULL;
1396
1397         headroom = xdp->data - xdp->data_hard_start;
1398         data_len = xdp->data_end - xdp->data;
1399         skb_reserve(skb, headroom);
1400         __skb_put(skb, data_len);
1401
1402         metasize = xdp->data - xdp->data_meta;
1403         metasize = metasize > 0 ? metasize : 0;
1404         if (metasize)
1405                 skb_metadata_set(skb, metasize);
1406
1407         if (unlikely(xdp_buff_has_frags(xdp)))
1408                 xdp_update_skb_shared_info(skb, nr_frags,
1409                                            sinfo->xdp_frags_size,
1410                                            xdp_frags_truesz,
1411                                            xdp_buff_is_frag_pfmemalloc(xdp));
1412
1413         return skb;
1414 }
1415
1416 /* TODO: build xdp in big mode */
1417 static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
1418                                       struct virtnet_info *vi,
1419                                       struct receive_queue *rq,
1420                                       struct xdp_buff *xdp,
1421                                       void *buf,
1422                                       unsigned int len,
1423                                       unsigned int frame_sz,
1424                                       int *num_buf,
1425                                       unsigned int *xdp_frags_truesize,
1426                                       struct virtnet_rq_stats *stats)
1427 {
1428         struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1429         unsigned int headroom, tailroom, room;
1430         unsigned int truesize, cur_frag_size;
1431         struct skb_shared_info *shinfo;
1432         unsigned int xdp_frags_truesz = 0;
1433         struct page *page;
1434         skb_frag_t *frag;
1435         int offset;
1436         void *ctx;
1437
1438         xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
1439         xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM,
1440                          VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
1441
1442         if (!*num_buf)
1443                 return 0;
1444
1445         if (*num_buf > 1) {
1446                 /* If we want to build multi-buffer xdp, we need
1447                  * to specify that the flags of xdp_buff have the
1448                  * XDP_FLAGS_HAS_FRAG bit.
1449                  */
1450                 if (!xdp_buff_has_frags(xdp))
1451                         xdp_buff_set_frags_flag(xdp);
1452
1453                 shinfo = xdp_get_shared_info_from_buff(xdp);
1454                 shinfo->nr_frags = 0;
1455                 shinfo->xdp_frags_size = 0;
1456         }
1457
1458         if (*num_buf > MAX_SKB_FRAGS + 1)
1459                 return -EINVAL;
1460
1461         while (--*num_buf > 0) {
1462                 buf = virtnet_rq_get_buf(rq, &len, &ctx);
1463                 if (unlikely(!buf)) {
1464                         pr_debug("%s: rx error: %d buffers out of %d missing\n",
1465                                  dev->name, *num_buf,
1466                                  virtio16_to_cpu(vi->vdev, hdr->num_buffers));
1467                         DEV_STATS_INC(dev, rx_length_errors);
1468                         goto err;
1469                 }
1470
1471                 u64_stats_add(&stats->bytes, len);
1472                 page = virt_to_head_page(buf);
1473                 offset = buf - page_address(page);
1474
1475                 truesize = mergeable_ctx_to_truesize(ctx);
1476                 headroom = mergeable_ctx_to_headroom(ctx);
1477                 tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1478                 room = SKB_DATA_ALIGN(headroom + tailroom);
1479
1480                 cur_frag_size = truesize;
1481                 xdp_frags_truesz += cur_frag_size;
1482                 if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) {
1483                         put_page(page);
1484                         pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1485                                  dev->name, len, (unsigned long)(truesize - room));
1486                         DEV_STATS_INC(dev, rx_length_errors);
1487                         goto err;
1488                 }
1489
1490                 frag = &shinfo->frags[shinfo->nr_frags++];
1491                 skb_frag_fill_page_desc(frag, page, offset, len);
1492                 if (page_is_pfmemalloc(page))
1493                         xdp_buff_set_frag_pfmemalloc(xdp);
1494
1495                 shinfo->xdp_frags_size += len;
1496         }
1497
1498         *xdp_frags_truesize = xdp_frags_truesz;
1499         return 0;
1500
1501 err:
1502         put_xdp_frags(xdp);
1503         return -EINVAL;
1504 }
1505
1506 static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
1507                                    struct receive_queue *rq,
1508                                    struct bpf_prog *xdp_prog,
1509                                    void *ctx,
1510                                    unsigned int *frame_sz,
1511                                    int *num_buf,
1512                                    struct page **page,
1513                                    int offset,
1514                                    unsigned int *len,
1515                                    struct virtio_net_hdr_mrg_rxbuf *hdr)
1516 {
1517         unsigned int truesize = mergeable_ctx_to_truesize(ctx);
1518         unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1519         struct page *xdp_page;
1520         unsigned int xdp_room;
1521
1522         /* Transient failure which in theory could occur if
1523          * in-flight packets from before XDP was enabled reach
1524          * the receive path after XDP is loaded.
1525          */
1526         if (unlikely(hdr->hdr.gso_type))
1527                 return NULL;
1528
1529         /* Now XDP core assumes frag size is PAGE_SIZE, but buffers
1530          * with headroom may add hole in truesize, which
1531          * make their length exceed PAGE_SIZE. So we disabled the
1532          * hole mechanism for xdp. See add_recvbuf_mergeable().
1533          */
1534         *frame_sz = truesize;
1535
1536         if (likely(headroom >= virtnet_get_headroom(vi) &&
1537                    (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) {
1538                 return page_address(*page) + offset;
1539         }
1540
1541         /* This happens when headroom is not enough because
1542          * of the buffer was prefilled before XDP is set.
1543          * This should only happen for the first several packets.
1544          * In fact, vq reset can be used here to help us clean up
1545          * the prefilled buffers, but many existing devices do not
1546          * support it, and we don't want to bother users who are
1547          * using xdp normally.
1548          */
1549         if (!xdp_prog->aux->xdp_has_frags) {
1550                 /* linearize data for XDP */
1551                 xdp_page = xdp_linearize_page(rq, num_buf,
1552                                               *page, offset,
1553                                               VIRTIO_XDP_HEADROOM,
1554                                               len);
1555                 if (!xdp_page)
1556                         return NULL;
1557         } else {
1558                 xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
1559                                           sizeof(struct skb_shared_info));
1560                 if (*len + xdp_room > PAGE_SIZE)
1561                         return NULL;
1562
1563                 xdp_page = alloc_page(GFP_ATOMIC);
1564                 if (!xdp_page)
1565                         return NULL;
1566
1567                 memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM,
1568                        page_address(*page) + offset, *len);
1569         }
1570
1571         *frame_sz = PAGE_SIZE;
1572
1573         put_page(*page);
1574
1575         *page = xdp_page;
1576
1577         return page_address(*page) + VIRTIO_XDP_HEADROOM;
1578 }
1579
1580 static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
1581                                              struct virtnet_info *vi,
1582                                              struct receive_queue *rq,
1583                                              struct bpf_prog *xdp_prog,
1584                                              void *buf,
1585                                              void *ctx,
1586                                              unsigned int len,
1587                                              unsigned int *xdp_xmit,
1588                                              struct virtnet_rq_stats *stats)
1589 {
1590         struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1591         int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1592         struct page *page = virt_to_head_page(buf);
1593         int offset = buf - page_address(page);
1594         unsigned int xdp_frags_truesz = 0;
1595         struct sk_buff *head_skb;
1596         unsigned int frame_sz;
1597         struct xdp_buff xdp;
1598         void *data;
1599         u32 act;
1600         int err;
1601
1602         data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page,
1603                                      offset, &len, hdr);
1604         if (unlikely(!data))
1605                 goto err_xdp;
1606
1607         err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
1608                                          &num_buf, &xdp_frags_truesz, stats);
1609         if (unlikely(err))
1610                 goto err_xdp;
1611
1612         act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1613
1614         switch (act) {
1615         case XDP_PASS:
1616                 head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
1617                 if (unlikely(!head_skb))
1618                         break;
1619                 return head_skb;
1620
1621         case XDP_TX:
1622         case XDP_REDIRECT:
1623                 return NULL;
1624
1625         default:
1626                 break;
1627         }
1628
1629         put_xdp_frags(&xdp);
1630
1631 err_xdp:
1632         put_page(page);
1633         mergeable_buf_free(rq, num_buf, dev, stats);
1634
1635         u64_stats_inc(&stats->xdp_drops);
1636         u64_stats_inc(&stats->drops);
1637         return NULL;
1638 }
1639
1640 static struct sk_buff *receive_mergeable(struct net_device *dev,
1641                                          struct virtnet_info *vi,
1642                                          struct receive_queue *rq,
1643                                          void *buf,
1644                                          void *ctx,
1645                                          unsigned int len,
1646                                          unsigned int *xdp_xmit,
1647                                          struct virtnet_rq_stats *stats)
1648 {
1649         struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1650         int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1651         struct page *page = virt_to_head_page(buf);
1652         int offset = buf - page_address(page);
1653         struct sk_buff *head_skb, *curr_skb;
1654         unsigned int truesize = mergeable_ctx_to_truesize(ctx);
1655         unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1656         unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1657         unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1658
1659         head_skb = NULL;
1660         u64_stats_add(&stats->bytes, len - vi->hdr_len);
1661
1662         if (unlikely(len > truesize - room)) {
1663                 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1664                          dev->name, len, (unsigned long)(truesize - room));
1665                 DEV_STATS_INC(dev, rx_length_errors);
1666                 goto err_skb;
1667         }
1668
1669         if (unlikely(vi->xdp_enabled)) {
1670                 struct bpf_prog *xdp_prog;
1671
1672                 rcu_read_lock();
1673                 xdp_prog = rcu_dereference(rq->xdp_prog);
1674                 if (xdp_prog) {
1675                         head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx,
1676                                                          len, xdp_xmit, stats);
1677                         rcu_read_unlock();
1678                         return head_skb;
1679                 }
1680                 rcu_read_unlock();
1681         }
1682
1683         head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
1684         curr_skb = head_skb;
1685
1686         if (unlikely(!curr_skb))
1687                 goto err_skb;
1688         while (--num_buf) {
1689                 int num_skb_frags;
1690
1691                 buf = virtnet_rq_get_buf(rq, &len, &ctx);
1692                 if (unlikely(!buf)) {
1693                         pr_debug("%s: rx error: %d buffers out of %d missing\n",
1694                                  dev->name, num_buf,
1695                                  virtio16_to_cpu(vi->vdev,
1696                                                  hdr->num_buffers));
1697                         DEV_STATS_INC(dev, rx_length_errors);
1698                         goto err_buf;
1699                 }
1700
1701                 u64_stats_add(&stats->bytes, len);
1702                 page = virt_to_head_page(buf);
1703
1704                 truesize = mergeable_ctx_to_truesize(ctx);
1705                 headroom = mergeable_ctx_to_headroom(ctx);
1706                 tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1707                 room = SKB_DATA_ALIGN(headroom + tailroom);
1708                 if (unlikely(len > truesize - room)) {
1709                         pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1710                                  dev->name, len, (unsigned long)(truesize - room));
1711                         DEV_STATS_INC(dev, rx_length_errors);
1712                         goto err_skb;
1713                 }
1714
1715                 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
1716                 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
1717                         struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
1718
1719                         if (unlikely(!nskb))
1720                                 goto err_skb;
1721                         if (curr_skb == head_skb)
1722                                 skb_shinfo(curr_skb)->frag_list = nskb;
1723                         else
1724                                 curr_skb->next = nskb;
1725                         curr_skb = nskb;
1726                         head_skb->truesize += nskb->truesize;
1727                         num_skb_frags = 0;
1728                 }
1729                 if (curr_skb != head_skb) {
1730                         head_skb->data_len += len;
1731                         head_skb->len += len;
1732                         head_skb->truesize += truesize;
1733                 }
1734                 offset = buf - page_address(page);
1735                 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
1736                         put_page(page);
1737                         skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
1738                                              len, truesize);
1739                 } else {
1740                         skb_add_rx_frag(curr_skb, num_skb_frags, page,
1741                                         offset, len, truesize);
1742                 }
1743         }
1744
1745         ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
1746         return head_skb;
1747
1748 err_skb:
1749         put_page(page);
1750         mergeable_buf_free(rq, num_buf, dev, stats);
1751
1752 err_buf:
1753         u64_stats_inc(&stats->drops);
1754         dev_kfree_skb(head_skb);
1755         return NULL;
1756 }
1757
1758 static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
1759                                 struct sk_buff *skb)
1760 {
1761         enum pkt_hash_types rss_hash_type;
1762
1763         if (!hdr_hash || !skb)
1764                 return;
1765
1766         switch (__le16_to_cpu(hdr_hash->hash_report)) {
1767         case VIRTIO_NET_HASH_REPORT_TCPv4:
1768         case VIRTIO_NET_HASH_REPORT_UDPv4:
1769         case VIRTIO_NET_HASH_REPORT_TCPv6:
1770         case VIRTIO_NET_HASH_REPORT_UDPv6:
1771         case VIRTIO_NET_HASH_REPORT_TCPv6_EX:
1772         case VIRTIO_NET_HASH_REPORT_UDPv6_EX:
1773                 rss_hash_type = PKT_HASH_TYPE_L4;
1774                 break;
1775         case VIRTIO_NET_HASH_REPORT_IPv4:
1776         case VIRTIO_NET_HASH_REPORT_IPv6:
1777         case VIRTIO_NET_HASH_REPORT_IPv6_EX:
1778                 rss_hash_type = PKT_HASH_TYPE_L3;
1779                 break;
1780         case VIRTIO_NET_HASH_REPORT_NONE:
1781         default:
1782                 rss_hash_type = PKT_HASH_TYPE_NONE;
1783         }
1784         skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
1785 }
1786
1787 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
1788                         void *buf, unsigned int len, void **ctx,
1789                         unsigned int *xdp_xmit,
1790                         struct virtnet_rq_stats *stats)
1791 {
1792         struct net_device *dev = vi->dev;
1793         struct sk_buff *skb;
1794         struct virtio_net_common_hdr *hdr;
1795
1796         if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
1797                 pr_debug("%s: short packet %i\n", dev->name, len);
1798                 DEV_STATS_INC(dev, rx_length_errors);
1799                 virtnet_rq_free_buf(vi, rq, buf);
1800                 return;
1801         }
1802
1803         if (vi->mergeable_rx_bufs)
1804                 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
1805                                         stats);
1806         else if (vi->big_packets)
1807                 skb = receive_big(dev, vi, rq, buf, len, stats);
1808         else
1809                 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
1810
1811         if (unlikely(!skb))
1812                 return;
1813
1814         hdr = skb_vnet_common_hdr(skb);
1815         if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
1816                 virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
1817
1818         if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
1819                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1820
1821         if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
1822                                   virtio_is_little_endian(vi->vdev))) {
1823                 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
1824                                      dev->name, hdr->hdr.gso_type,
1825                                      hdr->hdr.gso_size);
1826                 goto frame_err;
1827         }
1828
1829         skb_record_rx_queue(skb, vq2rxq(rq->vq));
1830         skb->protocol = eth_type_trans(skb, dev);
1831         pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
1832                  ntohs(skb->protocol), skb->len, skb->pkt_type);
1833
1834         napi_gro_receive(&rq->napi, skb);
1835         return;
1836
1837 frame_err:
1838         DEV_STATS_INC(dev, rx_frame_errors);
1839         dev_kfree_skb(skb);
1840 }
1841
1842 /* Unlike mergeable buffers, all buffers are allocated to the
1843  * same size, except for the headroom. For this reason we do
1844  * not need to use  mergeable_len_to_ctx here - it is enough
1845  * to store the headroom as the context ignoring the truesize.
1846  */
1847 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
1848                              gfp_t gfp)
1849 {
1850         char *buf;
1851         unsigned int xdp_headroom = virtnet_get_headroom(vi);
1852         void *ctx = (void *)(unsigned long)xdp_headroom;
1853         int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
1854         int err;
1855
1856         len = SKB_DATA_ALIGN(len) +
1857               SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1858
1859         buf = virtnet_rq_alloc(rq, len, gfp);
1860         if (unlikely(!buf))
1861                 return -ENOMEM;
1862
1863         virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom,
1864                                vi->hdr_len + GOOD_PACKET_LEN);
1865
1866         err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1867         if (err < 0) {
1868                 if (rq->do_dma)
1869                         virtnet_rq_unmap(rq, buf, 0);
1870                 put_page(virt_to_head_page(buf));
1871         }
1872
1873         return err;
1874 }
1875
1876 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
1877                            gfp_t gfp)
1878 {
1879         struct page *first, *list = NULL;
1880         char *p;
1881         int i, err, offset;
1882
1883         sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
1884
1885         /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
1886         for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) {
1887                 first = get_a_page(rq, gfp);
1888                 if (!first) {
1889                         if (list)
1890                                 give_pages(rq, list);
1891                         return -ENOMEM;
1892                 }
1893                 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
1894
1895                 /* chain new page in list head to match sg */
1896                 first->private = (unsigned long)list;
1897                 list = first;
1898         }
1899
1900         first = get_a_page(rq, gfp);
1901         if (!first) {
1902                 give_pages(rq, list);
1903                 return -ENOMEM;
1904         }
1905         p = page_address(first);
1906
1907         /* rq->sg[0], rq->sg[1] share the same page */
1908         /* a separated rq->sg[0] for header - required in case !any_header_sg */
1909         sg_set_buf(&rq->sg[0], p, vi->hdr_len);
1910
1911         /* rq->sg[1] for data packet, from offset */
1912         offset = sizeof(struct padded_vnet_hdr);
1913         sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
1914
1915         /* chain first in list head */
1916         first->private = (unsigned long)list;
1917         err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
1918                                   first, gfp);
1919         if (err < 0)
1920                 give_pages(rq, first);
1921
1922         return err;
1923 }
1924
1925 static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
1926                                           struct ewma_pkt_len *avg_pkt_len,
1927                                           unsigned int room)
1928 {
1929         struct virtnet_info *vi = rq->vq->vdev->priv;
1930         const size_t hdr_len = vi->hdr_len;
1931         unsigned int len;
1932
1933         if (room)
1934                 return PAGE_SIZE - room;
1935
1936         len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
1937                                 rq->min_buf_len, PAGE_SIZE - hdr_len);
1938
1939         return ALIGN(len, L1_CACHE_BYTES);
1940 }
1941
1942 static int add_recvbuf_mergeable(struct virtnet_info *vi,
1943                                  struct receive_queue *rq, gfp_t gfp)
1944 {
1945         struct page_frag *alloc_frag = &rq->alloc_frag;
1946         unsigned int headroom = virtnet_get_headroom(vi);
1947         unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1948         unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1949         unsigned int len, hole;
1950         void *ctx;
1951         char *buf;
1952         int err;
1953
1954         /* Extra tailroom is needed to satisfy XDP's assumption. This
1955          * means rx frags coalescing won't work, but consider we've
1956          * disabled GSO for XDP, it won't be a big issue.
1957          */
1958         len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
1959
1960         buf = virtnet_rq_alloc(rq, len + room, gfp);
1961         if (unlikely(!buf))
1962                 return -ENOMEM;
1963
1964         buf += headroom; /* advance address leaving hole at front of pkt */
1965         hole = alloc_frag->size - alloc_frag->offset;
1966         if (hole < len + room) {
1967                 /* To avoid internal fragmentation, if there is very likely not
1968                  * enough space for another buffer, add the remaining space to
1969                  * the current buffer.
1970                  * XDP core assumes that frame_size of xdp_buff and the length
1971                  * of the frag are PAGE_SIZE, so we disable the hole mechanism.
1972                  */
1973                 if (!headroom)
1974                         len += hole;
1975                 alloc_frag->offset += hole;
1976         }
1977
1978         virtnet_rq_init_one_sg(rq, buf, len);
1979
1980         ctx = mergeable_len_to_ctx(len + room, headroom);
1981         err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1982         if (err < 0) {
1983                 if (rq->do_dma)
1984                         virtnet_rq_unmap(rq, buf, 0);
1985                 put_page(virt_to_head_page(buf));
1986         }
1987
1988         return err;
1989 }
1990
1991 /*
1992  * Returns false if we couldn't fill entirely (OOM).
1993  *
1994  * Normally run in the receive path, but can also be run from ndo_open
1995  * before we're receiving packets, or from refill_work which is
1996  * careful to disable receiving (using napi_disable).
1997  */
1998 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
1999                           gfp_t gfp)
2000 {
2001         int err;
2002         bool oom;
2003
2004         do {
2005                 if (vi->mergeable_rx_bufs)
2006                         err = add_recvbuf_mergeable(vi, rq, gfp);
2007                 else if (vi->big_packets)
2008                         err = add_recvbuf_big(vi, rq, gfp);
2009                 else
2010                         err = add_recvbuf_small(vi, rq, gfp);
2011
2012                 oom = err == -ENOMEM;
2013                 if (err)
2014                         break;
2015         } while (rq->vq->num_free);
2016         if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
2017                 unsigned long flags;
2018
2019                 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
2020                 u64_stats_inc(&rq->stats.kicks);
2021                 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
2022         }
2023
2024         return !oom;
2025 }
2026
2027 static void skb_recv_done(struct virtqueue *rvq)
2028 {
2029         struct virtnet_info *vi = rvq->vdev->priv;
2030         struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
2031
2032         rq->calls++;
2033         virtqueue_napi_schedule(&rq->napi, rvq);
2034 }
2035
2036 static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
2037 {
2038         napi_enable(napi);
2039
2040         /* If all buffers were filled by other side before we napi_enabled, we
2041          * won't get another interrupt, so process any outstanding packets now.
2042          * Call local_bh_enable after to trigger softIRQ processing.
2043          */
2044         local_bh_disable();
2045         virtqueue_napi_schedule(napi, vq);
2046         local_bh_enable();
2047 }
2048
2049 static void virtnet_napi_tx_enable(struct virtnet_info *vi,
2050                                    struct virtqueue *vq,
2051                                    struct napi_struct *napi)
2052 {
2053         if (!napi->weight)
2054                 return;
2055
2056         /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
2057          * enable the feature if this is likely affine with the transmit path.
2058          */
2059         if (!vi->affinity_hint_set) {
2060                 napi->weight = 0;
2061                 return;
2062         }
2063
2064         return virtnet_napi_enable(vq, napi);
2065 }
2066
2067 static void virtnet_napi_tx_disable(struct napi_struct *napi)
2068 {
2069         if (napi->weight)
2070                 napi_disable(napi);
2071 }
2072
2073 static void refill_work(struct work_struct *work)
2074 {
2075         struct virtnet_info *vi =
2076                 container_of(work, struct virtnet_info, refill.work);
2077         bool still_empty;
2078         int i;
2079
2080         for (i = 0; i < vi->curr_queue_pairs; i++) {
2081                 struct receive_queue *rq = &vi->rq[i];
2082
2083                 napi_disable(&rq->napi);
2084                 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
2085                 virtnet_napi_enable(rq->vq, &rq->napi);
2086
2087                 /* In theory, this can happen: if we don't get any buffers in
2088                  * we will *never* try to fill again.
2089                  */
2090                 if (still_empty)
2091                         schedule_delayed_work(&vi->refill, HZ/2);
2092         }
2093 }
2094
2095 static int virtnet_receive(struct receive_queue *rq, int budget,
2096                            unsigned int *xdp_xmit)
2097 {
2098         struct virtnet_info *vi = rq->vq->vdev->priv;
2099         struct virtnet_rq_stats stats = {};
2100         unsigned int len;
2101         int packets = 0;
2102         void *buf;
2103         int i;
2104
2105         if (!vi->big_packets || vi->mergeable_rx_bufs) {
2106                 void *ctx;
2107
2108                 while (packets < budget &&
2109                        (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
2110                         receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
2111                         packets++;
2112                 }
2113         } else {
2114                 while (packets < budget &&
2115                        (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
2116                         receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
2117                         packets++;
2118                 }
2119         }
2120
2121         if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
2122                 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
2123                         spin_lock(&vi->refill_lock);
2124                         if (vi->refill_enabled)
2125                                 schedule_delayed_work(&vi->refill, 0);
2126                         spin_unlock(&vi->refill_lock);
2127                 }
2128         }
2129
2130         u64_stats_set(&stats.packets, packets);
2131         u64_stats_update_begin(&rq->stats.syncp);
2132         for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
2133                 size_t offset = virtnet_rq_stats_desc[i].offset;
2134                 u64_stats_t *item, *src;
2135
2136                 item = (u64_stats_t *)((u8 *)&rq->stats + offset);
2137                 src = (u64_stats_t *)((u8 *)&stats + offset);
2138                 u64_stats_add(item, u64_stats_read(src));
2139         }
2140         u64_stats_update_end(&rq->stats.syncp);
2141
2142         return packets;
2143 }
2144
2145 static void virtnet_poll_cleantx(struct receive_queue *rq)
2146 {
2147         struct virtnet_info *vi = rq->vq->vdev->priv;
2148         unsigned int index = vq2rxq(rq->vq);
2149         struct send_queue *sq = &vi->sq[index];
2150         struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
2151
2152         if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
2153                 return;
2154
2155         if (__netif_tx_trylock(txq)) {
2156                 if (sq->reset) {
2157                         __netif_tx_unlock(txq);
2158                         return;
2159                 }
2160
2161                 do {
2162                         virtqueue_disable_cb(sq->vq);
2163                         free_old_xmit_skbs(sq, true);
2164                 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
2165
2166                 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
2167                         netif_tx_wake_queue(txq);
2168
2169                 __netif_tx_unlock(txq);
2170         }
2171 }
2172
2173 static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq)
2174 {
2175         struct dim_sample cur_sample = {};
2176
2177         if (!rq->packets_in_napi)
2178                 return;
2179
2180         u64_stats_update_begin(&rq->stats.syncp);
2181         dim_update_sample(rq->calls,
2182                           u64_stats_read(&rq->stats.packets),
2183                           u64_stats_read(&rq->stats.bytes),
2184                           &cur_sample);
2185         u64_stats_update_end(&rq->stats.syncp);
2186
2187         net_dim(&rq->dim, cur_sample);
2188         rq->packets_in_napi = 0;
2189 }
2190
2191 static int virtnet_poll(struct napi_struct *napi, int budget)
2192 {
2193         struct receive_queue *rq =
2194                 container_of(napi, struct receive_queue, napi);
2195         struct virtnet_info *vi = rq->vq->vdev->priv;
2196         struct send_queue *sq;
2197         unsigned int received;
2198         unsigned int xdp_xmit = 0;
2199         bool napi_complete;
2200
2201         virtnet_poll_cleantx(rq);
2202
2203         received = virtnet_receive(rq, budget, &xdp_xmit);
2204         rq->packets_in_napi += received;
2205
2206         if (xdp_xmit & VIRTIO_XDP_REDIR)
2207                 xdp_do_flush();
2208
2209         /* Out of packets? */
2210         if (received < budget) {
2211                 napi_complete = virtqueue_napi_complete(napi, rq->vq, received);
2212                 if (napi_complete && rq->dim_enabled)
2213                         virtnet_rx_dim_update(vi, rq);
2214         }
2215
2216         if (xdp_xmit & VIRTIO_XDP_TX) {
2217                 sq = virtnet_xdp_get_sq(vi);
2218                 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2219                         u64_stats_update_begin(&sq->stats.syncp);
2220                         u64_stats_inc(&sq->stats.kicks);
2221                         u64_stats_update_end(&sq->stats.syncp);
2222                 }
2223                 virtnet_xdp_put_sq(vi, sq);
2224         }
2225
2226         return received;
2227 }
2228
2229 static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
2230 {
2231         virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
2232         napi_disable(&vi->rq[qp_index].napi);
2233         xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
2234 }
2235
2236 static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
2237 {
2238         struct net_device *dev = vi->dev;
2239         int err;
2240
2241         err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
2242                                vi->rq[qp_index].napi.napi_id);
2243         if (err < 0)
2244                 return err;
2245
2246         err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
2247                                          MEM_TYPE_PAGE_SHARED, NULL);
2248         if (err < 0)
2249                 goto err_xdp_reg_mem_model;
2250
2251         virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
2252         virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
2253
2254         return 0;
2255
2256 err_xdp_reg_mem_model:
2257         xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
2258         return err;
2259 }
2260
2261 static int virtnet_open(struct net_device *dev)
2262 {
2263         struct virtnet_info *vi = netdev_priv(dev);
2264         int i, err;
2265
2266         enable_delayed_refill(vi);
2267
2268         for (i = 0; i < vi->max_queue_pairs; i++) {
2269                 if (i < vi->curr_queue_pairs)
2270                         /* Make sure we have some buffers: if oom use wq. */
2271                         if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
2272                                 schedule_delayed_work(&vi->refill, 0);
2273
2274                 err = virtnet_enable_queue_pair(vi, i);
2275                 if (err < 0)
2276                         goto err_enable_qp;
2277         }
2278
2279         return 0;
2280
2281 err_enable_qp:
2282         disable_delayed_refill(vi);
2283         cancel_delayed_work_sync(&vi->refill);
2284
2285         for (i--; i >= 0; i--) {
2286                 virtnet_disable_queue_pair(vi, i);
2287                 cancel_work_sync(&vi->rq[i].dim.work);
2288         }
2289
2290         return err;
2291 }
2292
2293 static int virtnet_poll_tx(struct napi_struct *napi, int budget)
2294 {
2295         struct send_queue *sq = container_of(napi, struct send_queue, napi);
2296         struct virtnet_info *vi = sq->vq->vdev->priv;
2297         unsigned int index = vq2txq(sq->vq);
2298         struct netdev_queue *txq;
2299         int opaque;
2300         bool done;
2301
2302         if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
2303                 /* We don't need to enable cb for XDP */
2304                 napi_complete_done(napi, 0);
2305                 return 0;
2306         }
2307
2308         txq = netdev_get_tx_queue(vi->dev, index);
2309         __netif_tx_lock(txq, raw_smp_processor_id());
2310         virtqueue_disable_cb(sq->vq);
2311         free_old_xmit_skbs(sq, true);
2312
2313         if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
2314                 netif_tx_wake_queue(txq);
2315
2316         opaque = virtqueue_enable_cb_prepare(sq->vq);
2317
2318         done = napi_complete_done(napi, 0);
2319
2320         if (!done)
2321                 virtqueue_disable_cb(sq->vq);
2322
2323         __netif_tx_unlock(txq);
2324
2325         if (done) {
2326                 if (unlikely(virtqueue_poll(sq->vq, opaque))) {
2327                         if (napi_schedule_prep(napi)) {
2328                                 __netif_tx_lock(txq, raw_smp_processor_id());
2329                                 virtqueue_disable_cb(sq->vq);
2330                                 __netif_tx_unlock(txq);
2331                                 __napi_schedule(napi);
2332                         }
2333                 }
2334         }
2335
2336         return 0;
2337 }
2338
2339 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
2340 {
2341         struct virtio_net_hdr_mrg_rxbuf *hdr;
2342         const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
2343         struct virtnet_info *vi = sq->vq->vdev->priv;
2344         int num_sg;
2345         unsigned hdr_len = vi->hdr_len;
2346         bool can_push;
2347
2348         pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
2349
2350         can_push = vi->any_header_sg &&
2351                 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
2352                 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
2353         /* Even if we can, don't push here yet as this would skew
2354          * csum_start offset below. */
2355         if (can_push)
2356                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
2357         else
2358                 hdr = &skb_vnet_common_hdr(skb)->mrg_hdr;
2359
2360         if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
2361                                     virtio_is_little_endian(vi->vdev), false,
2362                                     0))
2363                 return -EPROTO;
2364
2365         if (vi->mergeable_rx_bufs)
2366                 hdr->num_buffers = 0;
2367
2368         sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
2369         if (can_push) {
2370                 __skb_push(skb, hdr_len);
2371                 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
2372                 if (unlikely(num_sg < 0))
2373                         return num_sg;
2374                 /* Pull header back to avoid skew in tx bytes calculations. */
2375                 __skb_pull(skb, hdr_len);
2376         } else {
2377                 sg_set_buf(sq->sg, hdr, hdr_len);
2378                 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
2379                 if (unlikely(num_sg < 0))
2380                         return num_sg;
2381                 num_sg++;
2382         }
2383         return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
2384 }
2385
2386 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
2387 {
2388         struct virtnet_info *vi = netdev_priv(dev);
2389         int qnum = skb_get_queue_mapping(skb);
2390         struct send_queue *sq = &vi->sq[qnum];
2391         int err;
2392         struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
2393         bool kick = !netdev_xmit_more();
2394         bool use_napi = sq->napi.weight;
2395
2396         /* Free up any pending old buffers before queueing new ones. */
2397         do {
2398                 if (use_napi)
2399                         virtqueue_disable_cb(sq->vq);
2400
2401                 free_old_xmit_skbs(sq, false);
2402
2403         } while (use_napi && kick &&
2404                unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
2405
2406         /* timestamp packet in software */
2407         skb_tx_timestamp(skb);
2408
2409         /* Try to transmit */
2410         err = xmit_skb(sq, skb);
2411
2412         /* This should not happen! */
2413         if (unlikely(err)) {
2414                 DEV_STATS_INC(dev, tx_fifo_errors);
2415                 if (net_ratelimit())
2416                         dev_warn(&dev->dev,
2417                                  "Unexpected TXQ (%d) queue failure: %d\n",
2418                                  qnum, err);
2419                 DEV_STATS_INC(dev, tx_dropped);
2420                 dev_kfree_skb_any(skb);
2421                 return NETDEV_TX_OK;
2422         }
2423
2424         /* Don't wait up for transmitted skbs to be freed. */
2425         if (!use_napi) {
2426                 skb_orphan(skb);
2427                 nf_reset_ct(skb);
2428         }
2429
2430         check_sq_full_and_disable(vi, dev, sq);
2431
2432         if (kick || netif_xmit_stopped(txq)) {
2433                 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2434                         u64_stats_update_begin(&sq->stats.syncp);
2435                         u64_stats_inc(&sq->stats.kicks);
2436                         u64_stats_update_end(&sq->stats.syncp);
2437                 }
2438         }
2439
2440         return NETDEV_TX_OK;
2441 }
2442
2443 static int virtnet_rx_resize(struct virtnet_info *vi,
2444                              struct receive_queue *rq, u32 ring_num)
2445 {
2446         bool running = netif_running(vi->dev);
2447         int err, qindex;
2448
2449         qindex = rq - vi->rq;
2450
2451         if (running) {
2452                 napi_disable(&rq->napi);
2453                 cancel_work_sync(&rq->dim.work);
2454         }
2455
2456         err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
2457         if (err)
2458                 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
2459
2460         if (!try_fill_recv(vi, rq, GFP_KERNEL))
2461                 schedule_delayed_work(&vi->refill, 0);
2462
2463         if (running)
2464                 virtnet_napi_enable(rq->vq, &rq->napi);
2465         return err;
2466 }
2467
2468 static int virtnet_tx_resize(struct virtnet_info *vi,
2469                              struct send_queue *sq, u32 ring_num)
2470 {
2471         bool running = netif_running(vi->dev);
2472         struct netdev_queue *txq;
2473         int err, qindex;
2474
2475         qindex = sq - vi->sq;
2476
2477         if (running)
2478                 virtnet_napi_tx_disable(&sq->napi);
2479
2480         txq = netdev_get_tx_queue(vi->dev, qindex);
2481
2482         /* 1. wait all ximt complete
2483          * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
2484          */
2485         __netif_tx_lock_bh(txq);
2486
2487         /* Prevent rx poll from accessing sq. */
2488         sq->reset = true;
2489
2490         /* Prevent the upper layer from trying to send packets. */
2491         netif_stop_subqueue(vi->dev, qindex);
2492
2493         __netif_tx_unlock_bh(txq);
2494
2495         err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
2496         if (err)
2497                 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
2498
2499         __netif_tx_lock_bh(txq);
2500         sq->reset = false;
2501         netif_tx_wake_queue(txq);
2502         __netif_tx_unlock_bh(txq);
2503
2504         if (running)
2505                 virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
2506         return err;
2507 }
2508
2509 /*
2510  * Send command via the control virtqueue and check status.  Commands
2511  * supported by the hypervisor, as indicated by feature bits, should
2512  * never fail unless improperly formatted.
2513  */
2514 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
2515                                  struct scatterlist *out)
2516 {
2517         struct scatterlist *sgs[4], hdr, stat;
2518         unsigned out_num = 0, tmp;
2519         int ret;
2520
2521         /* Caller should know better */
2522         BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
2523
2524         vi->ctrl->status = ~0;
2525         vi->ctrl->hdr.class = class;
2526         vi->ctrl->hdr.cmd = cmd;
2527         /* Add header */
2528         sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
2529         sgs[out_num++] = &hdr;
2530
2531         if (out)
2532                 sgs[out_num++] = out;
2533
2534         /* Add return status. */
2535         sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
2536         sgs[out_num] = &stat;
2537
2538         BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
2539         ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
2540         if (ret < 0) {
2541                 dev_warn(&vi->vdev->dev,
2542                          "Failed to add sgs for command vq: %d\n.", ret);
2543                 return false;
2544         }
2545
2546         if (unlikely(!virtqueue_kick(vi->cvq)))
2547                 return vi->ctrl->status == VIRTIO_NET_OK;
2548
2549         /* Spin for a response, the kick causes an ioport write, trapping
2550          * into the hypervisor, so the request should be handled immediately.
2551          */
2552         while (!virtqueue_get_buf(vi->cvq, &tmp) &&
2553                !virtqueue_is_broken(vi->cvq))
2554                 cpu_relax();
2555
2556         return vi->ctrl->status == VIRTIO_NET_OK;
2557 }
2558
2559 static int virtnet_set_mac_address(struct net_device *dev, void *p)
2560 {
2561         struct virtnet_info *vi = netdev_priv(dev);
2562         struct virtio_device *vdev = vi->vdev;
2563         int ret;
2564         struct sockaddr *addr;
2565         struct scatterlist sg;
2566
2567         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
2568                 return -EOPNOTSUPP;
2569
2570         addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
2571         if (!addr)
2572                 return -ENOMEM;
2573
2574         ret = eth_prepare_mac_addr_change(dev, addr);
2575         if (ret)
2576                 goto out;
2577
2578         if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
2579                 sg_init_one(&sg, addr->sa_data, dev->addr_len);
2580                 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2581                                           VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
2582                         dev_warn(&vdev->dev,
2583                                  "Failed to set mac address by vq command.\n");
2584                         ret = -EINVAL;
2585                         goto out;
2586                 }
2587         } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
2588                    !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2589                 unsigned int i;
2590
2591                 /* Naturally, this has an atomicity problem. */
2592                 for (i = 0; i < dev->addr_len; i++)
2593                         virtio_cwrite8(vdev,
2594                                        offsetof(struct virtio_net_config, mac) +
2595                                        i, addr->sa_data[i]);
2596         }
2597
2598         eth_commit_mac_addr_change(dev, p);
2599         ret = 0;
2600
2601 out:
2602         kfree(addr);
2603         return ret;
2604 }
2605
2606 static void virtnet_stats(struct net_device *dev,
2607                           struct rtnl_link_stats64 *tot)
2608 {
2609         struct virtnet_info *vi = netdev_priv(dev);
2610         unsigned int start;
2611         int i;
2612
2613         for (i = 0; i < vi->max_queue_pairs; i++) {
2614                 u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops;
2615                 struct receive_queue *rq = &vi->rq[i];
2616                 struct send_queue *sq = &vi->sq[i];
2617
2618                 do {
2619                         start = u64_stats_fetch_begin(&sq->stats.syncp);
2620                         tpackets = u64_stats_read(&sq->stats.packets);
2621                         tbytes   = u64_stats_read(&sq->stats.bytes);
2622                         terrors  = u64_stats_read(&sq->stats.tx_timeouts);
2623                 } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
2624
2625                 do {
2626                         start = u64_stats_fetch_begin(&rq->stats.syncp);
2627                         rpackets = u64_stats_read(&rq->stats.packets);
2628                         rbytes   = u64_stats_read(&rq->stats.bytes);
2629                         rdrops   = u64_stats_read(&rq->stats.drops);
2630                 } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
2631
2632                 tot->rx_packets += rpackets;
2633                 tot->tx_packets += tpackets;
2634                 tot->rx_bytes   += rbytes;
2635                 tot->tx_bytes   += tbytes;
2636                 tot->rx_dropped += rdrops;
2637                 tot->tx_errors  += terrors;
2638         }
2639
2640         tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
2641         tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors);
2642         tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors);
2643         tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors);
2644 }
2645
2646 static void virtnet_ack_link_announce(struct virtnet_info *vi)
2647 {
2648         rtnl_lock();
2649         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
2650                                   VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
2651                 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
2652         rtnl_unlock();
2653 }
2654
2655 static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
2656 {
2657         struct scatterlist sg;
2658         struct net_device *dev = vi->dev;
2659
2660         if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
2661                 return 0;
2662
2663         vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
2664         sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
2665
2666         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
2667                                   VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
2668                 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
2669                          queue_pairs);
2670                 return -EINVAL;
2671         } else {
2672                 vi->curr_queue_pairs = queue_pairs;
2673                 /* virtnet_open() will refill when device is going to up. */
2674                 if (dev->flags & IFF_UP)
2675                         schedule_delayed_work(&vi->refill, 0);
2676         }
2677
2678         return 0;
2679 }
2680
2681 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
2682 {
2683         int err;
2684
2685         rtnl_lock();
2686         err = _virtnet_set_queues(vi, queue_pairs);
2687         rtnl_unlock();
2688         return err;
2689 }
2690
2691 static int virtnet_close(struct net_device *dev)
2692 {
2693         struct virtnet_info *vi = netdev_priv(dev);
2694         int i;
2695
2696         /* Make sure NAPI doesn't schedule refill work */
2697         disable_delayed_refill(vi);
2698         /* Make sure refill_work doesn't re-enable napi! */
2699         cancel_delayed_work_sync(&vi->refill);
2700
2701         for (i = 0; i < vi->max_queue_pairs; i++) {
2702                 virtnet_disable_queue_pair(vi, i);
2703                 cancel_work_sync(&vi->rq[i].dim.work);
2704         }
2705
2706         return 0;
2707 }
2708
2709 static void virtnet_set_rx_mode(struct net_device *dev)
2710 {
2711         struct virtnet_info *vi = netdev_priv(dev);
2712         struct scatterlist sg[2];
2713         struct virtio_net_ctrl_mac *mac_data;
2714         struct netdev_hw_addr *ha;
2715         int uc_count;
2716         int mc_count;
2717         void *buf;
2718         int i;
2719
2720         /* We can't dynamically set ndo_set_rx_mode, so return gracefully */
2721         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
2722                 return;
2723
2724         vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
2725         vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
2726
2727         sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
2728
2729         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2730                                   VIRTIO_NET_CTRL_RX_PROMISC, sg))
2731                 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
2732                          vi->ctrl->promisc ? "en" : "dis");
2733
2734         sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
2735
2736         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2737                                   VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
2738                 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
2739                          vi->ctrl->allmulti ? "en" : "dis");
2740
2741         uc_count = netdev_uc_count(dev);
2742         mc_count = netdev_mc_count(dev);
2743         /* MAC filter - use one buffer for both lists */
2744         buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
2745                       (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
2746         mac_data = buf;
2747         if (!buf)
2748                 return;
2749
2750         sg_init_table(sg, 2);
2751
2752         /* Store the unicast list and count in the front of the buffer */
2753         mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
2754         i = 0;
2755         netdev_for_each_uc_addr(ha, dev)
2756                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2757
2758         sg_set_buf(&sg[0], mac_data,
2759                    sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
2760
2761         /* multicast list and count fill the end */
2762         mac_data = (void *)&mac_data->macs[uc_count][0];
2763
2764         mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
2765         i = 0;
2766         netdev_for_each_mc_addr(ha, dev)
2767                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2768
2769         sg_set_buf(&sg[1], mac_data,
2770                    sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
2771
2772         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2773                                   VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
2774                 dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
2775
2776         kfree(buf);
2777 }
2778
2779 static int virtnet_vlan_rx_add_vid(struct net_device *dev,
2780                                    __be16 proto, u16 vid)
2781 {
2782         struct virtnet_info *vi = netdev_priv(dev);
2783         struct scatterlist sg;
2784
2785         vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2786         sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2787
2788         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2789                                   VIRTIO_NET_CTRL_VLAN_ADD, &sg))
2790                 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
2791         return 0;
2792 }
2793
2794 static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
2795                                     __be16 proto, u16 vid)
2796 {
2797         struct virtnet_info *vi = netdev_priv(dev);
2798         struct scatterlist sg;
2799
2800         vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2801         sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2802
2803         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2804                                   VIRTIO_NET_CTRL_VLAN_DEL, &sg))
2805                 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
2806         return 0;
2807 }
2808
2809 static void virtnet_clean_affinity(struct virtnet_info *vi)
2810 {
2811         int i;
2812
2813         if (vi->affinity_hint_set) {
2814                 for (i = 0; i < vi->max_queue_pairs; i++) {
2815                         virtqueue_set_affinity(vi->rq[i].vq, NULL);
2816                         virtqueue_set_affinity(vi->sq[i].vq, NULL);
2817                 }
2818
2819                 vi->affinity_hint_set = false;
2820         }
2821 }
2822
2823 static void virtnet_set_affinity(struct virtnet_info *vi)
2824 {
2825         cpumask_var_t mask;
2826         int stragglers;
2827         int group_size;
2828         int i, j, cpu;
2829         int num_cpu;
2830         int stride;
2831
2832         if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2833                 virtnet_clean_affinity(vi);
2834                 return;
2835         }
2836
2837         num_cpu = num_online_cpus();
2838         stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
2839         stragglers = num_cpu >= vi->curr_queue_pairs ?
2840                         num_cpu % vi->curr_queue_pairs :
2841                         0;
2842         cpu = cpumask_first(cpu_online_mask);
2843
2844         for (i = 0; i < vi->curr_queue_pairs; i++) {
2845                 group_size = stride + (i < stragglers ? 1 : 0);
2846
2847                 for (j = 0; j < group_size; j++) {
2848                         cpumask_set_cpu(cpu, mask);
2849                         cpu = cpumask_next_wrap(cpu, cpu_online_mask,
2850                                                 nr_cpu_ids, false);
2851                 }
2852                 virtqueue_set_affinity(vi->rq[i].vq, mask);
2853                 virtqueue_set_affinity(vi->sq[i].vq, mask);
2854                 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
2855                 cpumask_clear(mask);
2856         }
2857
2858         vi->affinity_hint_set = true;
2859         free_cpumask_var(mask);
2860 }
2861
2862 static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
2863 {
2864         struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2865                                                    node);
2866         virtnet_set_affinity(vi);
2867         return 0;
2868 }
2869
2870 static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
2871 {
2872         struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2873                                                    node_dead);
2874         virtnet_set_affinity(vi);
2875         return 0;
2876 }
2877
2878 static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
2879 {
2880         struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2881                                                    node);
2882
2883         virtnet_clean_affinity(vi);
2884         return 0;
2885 }
2886
2887 static enum cpuhp_state virtionet_online;
2888
2889 static int virtnet_cpu_notif_add(struct virtnet_info *vi)
2890 {
2891         int ret;
2892
2893         ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
2894         if (ret)
2895                 return ret;
2896         ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2897                                                &vi->node_dead);
2898         if (!ret)
2899                 return ret;
2900         cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2901         return ret;
2902 }
2903
2904 static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
2905 {
2906         cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2907         cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2908                                             &vi->node_dead);
2909 }
2910
2911 static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
2912                                          u16 vqn, u32 max_usecs, u32 max_packets)
2913 {
2914         struct scatterlist sgs;
2915
2916         vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn);
2917         vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs);
2918         vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets);
2919         sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq));
2920
2921         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
2922                                   VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
2923                                   &sgs))
2924                 return -EINVAL;
2925
2926         return 0;
2927 }
2928
2929 static int virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
2930                                             u16 queue, u32 max_usecs,
2931                                             u32 max_packets)
2932 {
2933         int err;
2934
2935         err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
2936                                             max_usecs, max_packets);
2937         if (err)
2938                 return err;
2939
2940         vi->rq[queue].intr_coal.max_usecs = max_usecs;
2941         vi->rq[queue].intr_coal.max_packets = max_packets;
2942
2943         return 0;
2944 }
2945
2946 static int virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
2947                                             u16 queue, u32 max_usecs,
2948                                             u32 max_packets)
2949 {
2950         int err;
2951
2952         err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
2953                                             max_usecs, max_packets);
2954         if (err)
2955                 return err;
2956
2957         vi->sq[queue].intr_coal.max_usecs = max_usecs;
2958         vi->sq[queue].intr_coal.max_packets = max_packets;
2959
2960         return 0;
2961 }
2962
2963 static void virtnet_get_ringparam(struct net_device *dev,
2964                                   struct ethtool_ringparam *ring,
2965                                   struct kernel_ethtool_ringparam *kernel_ring,
2966                                   struct netlink_ext_ack *extack)
2967 {
2968         struct virtnet_info *vi = netdev_priv(dev);
2969
2970         ring->rx_max_pending = vi->rq[0].vq->num_max;
2971         ring->tx_max_pending = vi->sq[0].vq->num_max;
2972         ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2973         ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
2974 }
2975
2976 static int virtnet_set_ringparam(struct net_device *dev,
2977                                  struct ethtool_ringparam *ring,
2978                                  struct kernel_ethtool_ringparam *kernel_ring,
2979                                  struct netlink_ext_ack *extack)
2980 {
2981         struct virtnet_info *vi = netdev_priv(dev);
2982         u32 rx_pending, tx_pending;
2983         struct receive_queue *rq;
2984         struct send_queue *sq;
2985         int i, err;
2986
2987         if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2988                 return -EINVAL;
2989
2990         rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2991         tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
2992
2993         if (ring->rx_pending == rx_pending &&
2994             ring->tx_pending == tx_pending)
2995                 return 0;
2996
2997         if (ring->rx_pending > vi->rq[0].vq->num_max)
2998                 return -EINVAL;
2999
3000         if (ring->tx_pending > vi->sq[0].vq->num_max)
3001                 return -EINVAL;
3002
3003         for (i = 0; i < vi->max_queue_pairs; i++) {
3004                 rq = vi->rq + i;
3005                 sq = vi->sq + i;
3006
3007                 if (ring->tx_pending != tx_pending) {
3008                         err = virtnet_tx_resize(vi, sq, ring->tx_pending);
3009                         if (err)
3010                                 return err;
3011
3012                         /* Upon disabling and re-enabling a transmit virtqueue, the device must
3013                          * set the coalescing parameters of the virtqueue to those configured
3014                          * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver
3015                          * did not set any TX coalescing parameters, to 0.
3016                          */
3017                         err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i,
3018                                                                vi->intr_coal_tx.max_usecs,
3019                                                                vi->intr_coal_tx.max_packets);
3020                         if (err)
3021                                 return err;
3022                 }
3023
3024                 if (ring->rx_pending != rx_pending) {
3025                         err = virtnet_rx_resize(vi, rq, ring->rx_pending);
3026                         if (err)
3027                                 return err;
3028
3029                         /* The reason is same as the transmit virtqueue reset */
3030                         err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
3031                                                                vi->intr_coal_rx.max_usecs,
3032                                                                vi->intr_coal_rx.max_packets);
3033                         if (err)
3034                                 return err;
3035                 }
3036         }
3037
3038         return 0;
3039 }
3040
3041 static bool virtnet_commit_rss_command(struct virtnet_info *vi)
3042 {
3043         struct net_device *dev = vi->dev;
3044         struct scatterlist sgs[4];
3045         unsigned int sg_buf_size;
3046
3047         /* prepare sgs */
3048         sg_init_table(sgs, 4);
3049
3050         sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
3051         sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);
3052
3053         sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
3054         sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);
3055
3056         sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
3057                         - offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
3058         sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);
3059
3060         sg_buf_size = vi->rss_key_size;
3061         sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);
3062
3063         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
3064                                   vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
3065                                   : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
3066                 dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
3067                 return false;
3068         }
3069         return true;
3070 }
3071
3072 static void virtnet_init_default_rss(struct virtnet_info *vi)
3073 {
3074         u32 indir_val = 0;
3075         int i = 0;
3076
3077         vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
3078         vi->rss_hash_types_saved = vi->rss_hash_types_supported;
3079         vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
3080                                                 ? vi->rss_indir_table_size - 1 : 0;
3081         vi->ctrl->rss.unclassified_queue = 0;
3082
3083         for (; i < vi->rss_indir_table_size; ++i) {
3084                 indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
3085                 vi->ctrl->rss.indirection_table[i] = indir_val;
3086         }
3087
3088         vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
3089         vi->ctrl->rss.hash_key_length = vi->rss_key_size;
3090
3091         netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
3092 }
3093
3094 static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
3095 {
3096         info->data = 0;
3097         switch (info->flow_type) {
3098         case TCP_V4_FLOW:
3099                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
3100                         info->data = RXH_IP_SRC | RXH_IP_DST |
3101                                                  RXH_L4_B_0_1 | RXH_L4_B_2_3;
3102                 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
3103                         info->data = RXH_IP_SRC | RXH_IP_DST;
3104                 }
3105                 break;
3106         case TCP_V6_FLOW:
3107                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
3108                         info->data = RXH_IP_SRC | RXH_IP_DST |
3109                                                  RXH_L4_B_0_1 | RXH_L4_B_2_3;
3110                 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
3111                         info->data = RXH_IP_SRC | RXH_IP_DST;
3112                 }
3113                 break;
3114         case UDP_V4_FLOW:
3115                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
3116                         info->data = RXH_IP_SRC | RXH_IP_DST |
3117                                                  RXH_L4_B_0_1 | RXH_L4_B_2_3;
3118                 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
3119                         info->data = RXH_IP_SRC | RXH_IP_DST;
3120                 }
3121                 break;
3122         case UDP_V6_FLOW:
3123                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
3124                         info->data = RXH_IP_SRC | RXH_IP_DST |
3125                                                  RXH_L4_B_0_1 | RXH_L4_B_2_3;
3126                 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
3127                         info->data = RXH_IP_SRC | RXH_IP_DST;
3128                 }
3129                 break;
3130         case IPV4_FLOW:
3131                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4)
3132                         info->data = RXH_IP_SRC | RXH_IP_DST;
3133
3134                 break;
3135         case IPV6_FLOW:
3136                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6)
3137                         info->data = RXH_IP_SRC | RXH_IP_DST;
3138
3139                 break;
3140         default:
3141                 info->data = 0;
3142                 break;
3143         }
3144 }
3145
3146 static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
3147 {
3148         u32 new_hashtypes = vi->rss_hash_types_saved;
3149         bool is_disable = info->data & RXH_DISCARD;
3150         bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3);
3151
3152         /* supports only 'sd', 'sdfn' and 'r' */
3153         if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable))
3154                 return false;
3155
3156         switch (info->flow_type) {
3157         case TCP_V4_FLOW:
3158                 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4);
3159                 if (!is_disable)
3160                         new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
3161                                 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0);
3162                 break;
3163         case UDP_V4_FLOW:
3164                 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4);
3165                 if (!is_disable)
3166                         new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
3167                                 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0);
3168                 break;
3169         case IPV4_FLOW:
3170                 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4;
3171                 if (!is_disable)
3172                         new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4;
3173                 break;
3174         case TCP_V6_FLOW:
3175                 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6);
3176                 if (!is_disable)
3177                         new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
3178                                 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0);
3179                 break;
3180         case UDP_V6_FLOW:
3181                 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6);
3182                 if (!is_disable)
3183                         new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
3184                                 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0);
3185                 break;
3186         case IPV6_FLOW:
3187                 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6;
3188                 if (!is_disable)
3189                         new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6;
3190                 break;
3191         default:
3192                 /* unsupported flow */
3193                 return false;
3194         }
3195
3196         /* if unsupported hashtype was set */
3197         if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
3198                 return false;
3199
3200         if (new_hashtypes != vi->rss_hash_types_saved) {
3201                 vi->rss_hash_types_saved = new_hashtypes;
3202                 vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
3203                 if (vi->dev->features & NETIF_F_RXHASH)
3204                         return virtnet_commit_rss_command(vi);
3205         }
3206
3207         return true;
3208 }
3209
3210 static void virtnet_get_drvinfo(struct net_device *dev,
3211                                 struct ethtool_drvinfo *info)
3212 {
3213         struct virtnet_info *vi = netdev_priv(dev);
3214         struct virtio_device *vdev = vi->vdev;
3215
3216         strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
3217         strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
3218         strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
3219
3220 }
3221
3222 /* TODO: Eliminate OOO packets during switching */
3223 static int virtnet_set_channels(struct net_device *dev,
3224                                 struct ethtool_channels *channels)
3225 {
3226         struct virtnet_info *vi = netdev_priv(dev);
3227         u16 queue_pairs = channels->combined_count;
3228         int err;
3229
3230         /* We don't support separate rx/tx channels.
3231          * We don't allow setting 'other' channels.
3232          */
3233         if (channels->rx_count || channels->tx_count || channels->other_count)
3234                 return -EINVAL;
3235
3236         if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
3237                 return -EINVAL;
3238
3239         /* For now we don't support modifying channels while XDP is loaded
3240          * also when XDP is loaded all RX queues have XDP programs so we only
3241          * need to check a single RX queue.
3242          */
3243         if (vi->rq[0].xdp_prog)
3244                 return -EINVAL;
3245
3246         cpus_read_lock();
3247         err = _virtnet_set_queues(vi, queue_pairs);
3248         if (err) {
3249                 cpus_read_unlock();
3250                 goto err;
3251         }
3252         virtnet_set_affinity(vi);
3253         cpus_read_unlock();
3254
3255         netif_set_real_num_tx_queues(dev, queue_pairs);
3256         netif_set_real_num_rx_queues(dev, queue_pairs);
3257  err:
3258         return err;
3259 }
3260
3261 static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3262 {
3263         struct virtnet_info *vi = netdev_priv(dev);
3264         unsigned int i, j;
3265         u8 *p = data;
3266
3267         switch (stringset) {
3268         case ETH_SS_STATS:
3269                 for (i = 0; i < vi->curr_queue_pairs; i++) {
3270                         for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
3271                                 ethtool_sprintf(&p, "rx_queue_%u_%s", i,
3272                                                 virtnet_rq_stats_desc[j].desc);
3273                 }
3274
3275                 for (i = 0; i < vi->curr_queue_pairs; i++) {
3276                         for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
3277                                 ethtool_sprintf(&p, "tx_queue_%u_%s", i,
3278                                                 virtnet_sq_stats_desc[j].desc);
3279                 }
3280                 break;
3281         }
3282 }
3283
3284 static int virtnet_get_sset_count(struct net_device *dev, int sset)
3285 {
3286         struct virtnet_info *vi = netdev_priv(dev);
3287
3288         switch (sset) {
3289         case ETH_SS_STATS:
3290                 return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
3291                                                VIRTNET_SQ_STATS_LEN);
3292         default:
3293                 return -EOPNOTSUPP;
3294         }
3295 }
3296
3297 static void virtnet_get_ethtool_stats(struct net_device *dev,
3298                                       struct ethtool_stats *stats, u64 *data)
3299 {
3300         struct virtnet_info *vi = netdev_priv(dev);
3301         unsigned int idx = 0, start, i, j;
3302         const u8 *stats_base;
3303         const u64_stats_t *p;
3304         size_t offset;
3305
3306         for (i = 0; i < vi->curr_queue_pairs; i++) {
3307                 struct receive_queue *rq = &vi->rq[i];
3308
3309                 stats_base = (const u8 *)&rq->stats;
3310                 do {
3311                         start = u64_stats_fetch_begin(&rq->stats.syncp);
3312                         for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
3313                                 offset = virtnet_rq_stats_desc[j].offset;
3314                                 p = (const u64_stats_t *)(stats_base + offset);
3315                                 data[idx + j] = u64_stats_read(p);
3316                         }
3317                 } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
3318                 idx += VIRTNET_RQ_STATS_LEN;
3319         }
3320
3321         for (i = 0; i < vi->curr_queue_pairs; i++) {
3322                 struct send_queue *sq = &vi->sq[i];
3323
3324                 stats_base = (const u8 *)&sq->stats;
3325                 do {
3326                         start = u64_stats_fetch_begin(&sq->stats.syncp);
3327                         for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
3328                                 offset = virtnet_sq_stats_desc[j].offset;
3329                                 p = (const u64_stats_t *)(stats_base + offset);
3330                                 data[idx + j] = u64_stats_read(p);
3331                         }
3332                 } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
3333                 idx += VIRTNET_SQ_STATS_LEN;
3334         }
3335 }
3336
3337 static void virtnet_get_channels(struct net_device *dev,
3338                                  struct ethtool_channels *channels)
3339 {
3340         struct virtnet_info *vi = netdev_priv(dev);
3341
3342         channels->combined_count = vi->curr_queue_pairs;
3343         channels->max_combined = vi->max_queue_pairs;
3344         channels->max_other = 0;
3345         channels->rx_count = 0;
3346         channels->tx_count = 0;
3347         channels->other_count = 0;
3348 }
3349
3350 static int virtnet_set_link_ksettings(struct net_device *dev,
3351                                       const struct ethtool_link_ksettings *cmd)
3352 {
3353         struct virtnet_info *vi = netdev_priv(dev);
3354
3355         return ethtool_virtdev_set_link_ksettings(dev, cmd,
3356                                                   &vi->speed, &vi->duplex);
3357 }
3358
3359 static int virtnet_get_link_ksettings(struct net_device *dev,
3360                                       struct ethtool_link_ksettings *cmd)
3361 {
3362         struct virtnet_info *vi = netdev_priv(dev);
3363
3364         cmd->base.speed = vi->speed;
3365         cmd->base.duplex = vi->duplex;
3366         cmd->base.port = PORT_OTHER;
3367
3368         return 0;
3369 }
3370
3371 static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
3372                                           struct ethtool_coalesce *ec)
3373 {
3374         struct scatterlist sgs_tx;
3375         int i;
3376
3377         vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
3378         vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
3379         sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx));
3380
3381         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3382                                   VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
3383                                   &sgs_tx))
3384                 return -EINVAL;
3385
3386         vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
3387         vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
3388         for (i = 0; i < vi->max_queue_pairs; i++) {
3389                 vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs;
3390                 vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames;
3391         }
3392
3393         return 0;
3394 }
3395
3396 static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
3397                                           struct ethtool_coalesce *ec)
3398 {
3399         bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
3400         struct scatterlist sgs_rx;
3401         int i;
3402
3403         if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
3404                 return -EOPNOTSUPP;
3405
3406         if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs ||
3407                                ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
3408                 return -EINVAL;
3409
3410         if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
3411                 vi->rx_dim_enabled = true;
3412                 for (i = 0; i < vi->max_queue_pairs; i++)
3413                         vi->rq[i].dim_enabled = true;
3414                 return 0;
3415         }
3416
3417         if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
3418                 vi->rx_dim_enabled = false;
3419                 for (i = 0; i < vi->max_queue_pairs; i++)
3420                         vi->rq[i].dim_enabled = false;
3421         }
3422
3423         /* Since the per-queue coalescing params can be set,
3424          * we need apply the global new params even if they
3425          * are not updated.
3426          */
3427         vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
3428         vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
3429         sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
3430
3431         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3432                                   VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
3433                                   &sgs_rx))
3434                 return -EINVAL;
3435
3436         vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
3437         vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
3438         for (i = 0; i < vi->max_queue_pairs; i++) {
3439                 vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
3440                 vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
3441         }
3442
3443         return 0;
3444 }
3445
3446 static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
3447                                        struct ethtool_coalesce *ec)
3448 {
3449         int err;
3450
3451         err = virtnet_send_tx_notf_coal_cmds(vi, ec);
3452         if (err)
3453                 return err;
3454
3455         err = virtnet_send_rx_notf_coal_cmds(vi, ec);
3456         if (err)
3457                 return err;
3458
3459         return 0;
3460 }
3461
3462 static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
3463                                              struct ethtool_coalesce *ec,
3464                                              u16 queue)
3465 {
3466         bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
3467         bool cur_rx_dim = vi->rq[queue].dim_enabled;
3468         u32 max_usecs, max_packets;
3469         int err;
3470
3471         max_usecs = vi->rq[queue].intr_coal.max_usecs;
3472         max_packets = vi->rq[queue].intr_coal.max_packets;
3473
3474         if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs ||
3475                                ec->rx_max_coalesced_frames != max_packets))
3476                 return -EINVAL;
3477
3478         if (rx_ctrl_dim_on && !cur_rx_dim) {
3479                 vi->rq[queue].dim_enabled = true;
3480                 return 0;
3481         }
3482
3483         if (!rx_ctrl_dim_on && cur_rx_dim)
3484                 vi->rq[queue].dim_enabled = false;
3485
3486         /* If no params are updated, userspace ethtool will
3487          * reject the modification.
3488          */
3489         err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue,
3490                                                ec->rx_coalesce_usecs,
3491                                                ec->rx_max_coalesced_frames);
3492         if (err)
3493                 return err;
3494
3495         return 0;
3496 }
3497
3498 static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
3499                                           struct ethtool_coalesce *ec,
3500                                           u16 queue)
3501 {
3502         int err;
3503
3504         err = virtnet_send_rx_notf_coal_vq_cmds(vi, ec, queue);
3505         if (err)
3506                 return err;
3507
3508         err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, queue,
3509                                                ec->tx_coalesce_usecs,
3510                                                ec->tx_max_coalesced_frames);
3511         if (err)
3512                 return err;
3513
3514         return 0;
3515 }
3516
3517 static void virtnet_rx_dim_work(struct work_struct *work)
3518 {
3519         struct dim *dim = container_of(work, struct dim, work);
3520         struct receive_queue *rq = container_of(dim,
3521                         struct receive_queue, dim);
3522         struct virtnet_info *vi = rq->vq->vdev->priv;
3523         struct net_device *dev = vi->dev;
3524         struct dim_cq_moder update_moder;
3525         int i, qnum, err;
3526
3527         if (!rtnl_trylock())
3528                 return;
3529
3530         /* Each rxq's work is queued by "net_dim()->schedule_work()"
3531          * in response to NAPI traffic changes. Note that dim->profile_ix
3532          * for each rxq is updated prior to the queuing action.
3533          * So we only need to traverse and update profiles for all rxqs
3534          * in the work which is holding rtnl_lock.
3535          */
3536         for (i = 0; i < vi->curr_queue_pairs; i++) {
3537                 rq = &vi->rq[i];
3538                 dim = &rq->dim;
3539                 qnum = rq - vi->rq;
3540
3541                 if (!rq->dim_enabled)
3542                         continue;
3543
3544                 update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
3545                 if (update_moder.usec != rq->intr_coal.max_usecs ||
3546                     update_moder.pkts != rq->intr_coal.max_packets) {
3547                         err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
3548                                                                update_moder.usec,
3549                                                                update_moder.pkts);
3550                         if (err)
3551                                 pr_debug("%s: Failed to send dim parameters on rxq%d\n",
3552                                          dev->name, qnum);
3553                         dim->state = DIM_START_MEASURE;
3554                 }
3555         }
3556
3557         rtnl_unlock();
3558 }
3559
3560 static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
3561 {
3562         /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
3563          * or VIRTIO_NET_F_VQ_NOTF_COAL feature is negotiated.
3564          */
3565         if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
3566                 return -EOPNOTSUPP;
3567
3568         if (ec->tx_max_coalesced_frames > 1 ||
3569             ec->rx_max_coalesced_frames != 1)
3570                 return -EINVAL;
3571
3572         return 0;
3573 }
3574
3575 static int virtnet_should_update_vq_weight(int dev_flags, int weight,
3576                                            int vq_weight, bool *should_update)
3577 {
3578         if (weight ^ vq_weight) {
3579                 if (dev_flags & IFF_UP)
3580                         return -EBUSY;
3581                 *should_update = true;
3582         }
3583
3584         return 0;
3585 }
3586
3587 static int virtnet_set_coalesce(struct net_device *dev,
3588                                 struct ethtool_coalesce *ec,
3589                                 struct kernel_ethtool_coalesce *kernel_coal,
3590                                 struct netlink_ext_ack *extack)
3591 {
3592         struct virtnet_info *vi = netdev_priv(dev);
3593         int ret, queue_number, napi_weight;
3594         bool update_napi = false;
3595
3596         /* Can't change NAPI weight if the link is up */
3597         napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
3598         for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) {
3599                 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
3600                                                       vi->sq[queue_number].napi.weight,
3601                                                       &update_napi);
3602                 if (ret)
3603                         return ret;
3604
3605                 if (update_napi) {
3606                         /* All queues that belong to [queue_number, vi->max_queue_pairs] will be
3607                          * updated for the sake of simplicity, which might not be necessary
3608                          */
3609                         break;
3610                 }
3611         }
3612
3613         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
3614                 ret = virtnet_send_notf_coal_cmds(vi, ec);
3615         else
3616                 ret = virtnet_coal_params_supported(ec);
3617
3618         if (ret)
3619                 return ret;
3620
3621         if (update_napi) {
3622                 for (; queue_number < vi->max_queue_pairs; queue_number++)
3623                         vi->sq[queue_number].napi.weight = napi_weight;
3624         }
3625
3626         return ret;
3627 }
3628
3629 static int virtnet_get_coalesce(struct net_device *dev,
3630                                 struct ethtool_coalesce *ec,
3631                                 struct kernel_ethtool_coalesce *kernel_coal,
3632                                 struct netlink_ext_ack *extack)
3633 {
3634         struct virtnet_info *vi = netdev_priv(dev);
3635
3636         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
3637                 ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs;
3638                 ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs;
3639                 ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets;
3640                 ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets;
3641                 ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled;
3642         } else {
3643                 ec->rx_max_coalesced_frames = 1;
3644
3645                 if (vi->sq[0].napi.weight)
3646                         ec->tx_max_coalesced_frames = 1;
3647         }
3648
3649         return 0;
3650 }
3651
3652 static int virtnet_set_per_queue_coalesce(struct net_device *dev,
3653                                           u32 queue,
3654                                           struct ethtool_coalesce *ec)
3655 {
3656         struct virtnet_info *vi = netdev_priv(dev);
3657         int ret, napi_weight;
3658         bool update_napi = false;
3659
3660         if (queue >= vi->max_queue_pairs)
3661                 return -EINVAL;
3662
3663         /* Can't change NAPI weight if the link is up */
3664         napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
3665         ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
3666                                               vi->sq[queue].napi.weight,
3667                                               &update_napi);
3668         if (ret)
3669                 return ret;
3670
3671         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
3672                 ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue);
3673         else
3674                 ret = virtnet_coal_params_supported(ec);
3675
3676         if (ret)
3677                 return ret;
3678
3679         if (update_napi)
3680                 vi->sq[queue].napi.weight = napi_weight;
3681
3682         return 0;
3683 }
3684
3685 static int virtnet_get_per_queue_coalesce(struct net_device *dev,
3686                                           u32 queue,
3687                                           struct ethtool_coalesce *ec)
3688 {
3689         struct virtnet_info *vi = netdev_priv(dev);
3690
3691         if (queue >= vi->max_queue_pairs)
3692                 return -EINVAL;
3693
3694         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
3695                 ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
3696                 ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
3697                 ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
3698                 ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
3699                 ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled;
3700         } else {
3701                 ec->rx_max_coalesced_frames = 1;
3702
3703                 if (vi->sq[queue].napi.weight)
3704                         ec->tx_max_coalesced_frames = 1;
3705         }
3706
3707         return 0;
3708 }
3709
3710 static void virtnet_init_settings(struct net_device *dev)
3711 {
3712         struct virtnet_info *vi = netdev_priv(dev);
3713
3714         vi->speed = SPEED_UNKNOWN;
3715         vi->duplex = DUPLEX_UNKNOWN;
3716 }
3717
3718 static void virtnet_update_settings(struct virtnet_info *vi)
3719 {
3720         u32 speed;
3721         u8 duplex;
3722
3723         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
3724                 return;
3725
3726         virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
3727
3728         if (ethtool_validate_speed(speed))
3729                 vi->speed = speed;
3730
3731         virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
3732
3733         if (ethtool_validate_duplex(duplex))
3734                 vi->duplex = duplex;
3735 }
3736
3737 static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
3738 {
3739         return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
3740 }
3741
3742 static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
3743 {
3744         return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
3745 }
3746
3747 static int virtnet_get_rxfh(struct net_device *dev,
3748                             struct ethtool_rxfh_param *rxfh)
3749 {
3750         struct virtnet_info *vi = netdev_priv(dev);
3751         int i;
3752
3753         if (rxfh->indir) {
3754                 for (i = 0; i < vi->rss_indir_table_size; ++i)
3755                         rxfh->indir[i] = vi->ctrl->rss.indirection_table[i];
3756         }
3757
3758         if (rxfh->key)
3759                 memcpy(rxfh->key, vi->ctrl->rss.key, vi->rss_key_size);
3760
3761         rxfh->hfunc = ETH_RSS_HASH_TOP;
3762
3763         return 0;
3764 }
3765
3766 static int virtnet_set_rxfh(struct net_device *dev,
3767                             struct ethtool_rxfh_param *rxfh,
3768                             struct netlink_ext_ack *extack)
3769 {
3770         struct virtnet_info *vi = netdev_priv(dev);
3771         int i;
3772
3773         if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
3774             rxfh->hfunc != ETH_RSS_HASH_TOP)
3775                 return -EOPNOTSUPP;
3776
3777         if (rxfh->indir) {
3778                 for (i = 0; i < vi->rss_indir_table_size; ++i)
3779                         vi->ctrl->rss.indirection_table[i] = rxfh->indir[i];
3780         }
3781         if (rxfh->key)
3782                 memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size);
3783
3784         virtnet_commit_rss_command(vi);
3785
3786         return 0;
3787 }
3788
3789 static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
3790 {
3791         struct virtnet_info *vi = netdev_priv(dev);
3792         int rc = 0;
3793
3794         switch (info->cmd) {
3795         case ETHTOOL_GRXRINGS:
3796                 info->data = vi->curr_queue_pairs;
3797                 break;
3798         case ETHTOOL_GRXFH:
3799                 virtnet_get_hashflow(vi, info);
3800                 break;
3801         default:
3802                 rc = -EOPNOTSUPP;
3803         }
3804
3805         return rc;
3806 }
3807
3808 static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
3809 {
3810         struct virtnet_info *vi = netdev_priv(dev);
3811         int rc = 0;
3812
3813         switch (info->cmd) {
3814         case ETHTOOL_SRXFH:
3815                 if (!virtnet_set_hashflow(vi, info))
3816                         rc = -EINVAL;
3817
3818                 break;
3819         default:
3820                 rc = -EOPNOTSUPP;
3821         }
3822
3823         return rc;
3824 }
3825
3826 static const struct ethtool_ops virtnet_ethtool_ops = {
3827         .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
3828                 ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
3829         .get_drvinfo = virtnet_get_drvinfo,
3830         .get_link = ethtool_op_get_link,
3831         .get_ringparam = virtnet_get_ringparam,
3832         .set_ringparam = virtnet_set_ringparam,
3833         .get_strings = virtnet_get_strings,
3834         .get_sset_count = virtnet_get_sset_count,
3835         .get_ethtool_stats = virtnet_get_ethtool_stats,
3836         .set_channels = virtnet_set_channels,
3837         .get_channels = virtnet_get_channels,
3838         .get_ts_info = ethtool_op_get_ts_info,
3839         .get_link_ksettings = virtnet_get_link_ksettings,
3840         .set_link_ksettings = virtnet_set_link_ksettings,
3841         .set_coalesce = virtnet_set_coalesce,
3842         .get_coalesce = virtnet_get_coalesce,
3843         .set_per_queue_coalesce = virtnet_set_per_queue_coalesce,
3844         .get_per_queue_coalesce = virtnet_get_per_queue_coalesce,
3845         .get_rxfh_key_size = virtnet_get_rxfh_key_size,
3846         .get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
3847         .get_rxfh = virtnet_get_rxfh,
3848         .set_rxfh = virtnet_set_rxfh,
3849         .get_rxnfc = virtnet_get_rxnfc,
3850         .set_rxnfc = virtnet_set_rxnfc,
3851 };
3852
3853 static void virtnet_freeze_down(struct virtio_device *vdev)
3854 {
3855         struct virtnet_info *vi = vdev->priv;
3856
3857         /* Make sure no work handler is accessing the device */
3858         flush_work(&vi->config_work);
3859
3860         netif_tx_lock_bh(vi->dev);
3861         netif_device_detach(vi->dev);
3862         netif_tx_unlock_bh(vi->dev);
3863         if (netif_running(vi->dev))
3864                 virtnet_close(vi->dev);
3865 }
3866
3867 static int init_vqs(struct virtnet_info *vi);
3868
3869 static int virtnet_restore_up(struct virtio_device *vdev)
3870 {
3871         struct virtnet_info *vi = vdev->priv;
3872         int err;
3873
3874         err = init_vqs(vi);
3875         if (err)
3876                 return err;
3877
3878         virtio_device_ready(vdev);
3879
3880         enable_delayed_refill(vi);
3881
3882         if (netif_running(vi->dev)) {
3883                 err = virtnet_open(vi->dev);
3884                 if (err)
3885                         return err;
3886         }
3887
3888         netif_tx_lock_bh(vi->dev);
3889         netif_device_attach(vi->dev);
3890         netif_tx_unlock_bh(vi->dev);
3891         return err;
3892 }
3893
3894 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
3895 {
3896         struct scatterlist sg;
3897         vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
3898
3899         sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
3900
3901         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
3902                                   VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
3903                 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
3904                 return -EINVAL;
3905         }
3906
3907         return 0;
3908 }
3909
3910 static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
3911 {
3912         u64 offloads = 0;
3913
3914         if (!vi->guest_offloads)
3915                 return 0;
3916
3917         return virtnet_set_guest_offloads(vi, offloads);
3918 }
3919
3920 static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
3921 {
3922         u64 offloads = vi->guest_offloads;
3923
3924         if (!vi->guest_offloads)
3925                 return 0;
3926
3927         return virtnet_set_guest_offloads(vi, offloads);
3928 }
3929
3930 static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
3931                            struct netlink_ext_ack *extack)
3932 {
3933         unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
3934                                            sizeof(struct skb_shared_info));
3935         unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN;
3936         struct virtnet_info *vi = netdev_priv(dev);
3937         struct bpf_prog *old_prog;
3938         u16 xdp_qp = 0, curr_qp;
3939         int i, err;
3940
3941         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
3942             && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3943                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
3944                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
3945                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
3946                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) ||
3947                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) ||
3948                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) {
3949                 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
3950                 return -EOPNOTSUPP;
3951         }
3952
3953         if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
3954                 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
3955                 return -EINVAL;
3956         }
3957
3958         if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) {
3959                 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags");
3960                 netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz);
3961                 return -EINVAL;
3962         }
3963
3964         curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
3965         if (prog)
3966                 xdp_qp = nr_cpu_ids;
3967
3968         /* XDP requires extra queues for XDP_TX */
3969         if (curr_qp + xdp_qp > vi->max_queue_pairs) {
3970                 netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
3971                                  curr_qp + xdp_qp, vi->max_queue_pairs);
3972                 xdp_qp = 0;
3973         }
3974
3975         old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
3976         if (!prog && !old_prog)
3977                 return 0;
3978
3979         if (prog)
3980                 bpf_prog_add(prog, vi->max_queue_pairs - 1);
3981
3982         /* Make sure NAPI is not using any XDP TX queues for RX. */
3983         if (netif_running(dev)) {
3984                 for (i = 0; i < vi->max_queue_pairs; i++) {
3985                         napi_disable(&vi->rq[i].napi);
3986                         virtnet_napi_tx_disable(&vi->sq[i].napi);
3987                 }
3988         }
3989
3990         if (!prog) {
3991                 for (i = 0; i < vi->max_queue_pairs; i++) {
3992                         rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
3993                         if (i == 0)
3994                                 virtnet_restore_guest_offloads(vi);
3995                 }
3996                 synchronize_net();
3997         }
3998
3999         err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
4000         if (err)
4001                 goto err;
4002         netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
4003         vi->xdp_queue_pairs = xdp_qp;
4004
4005         if (prog) {
4006                 vi->xdp_enabled = true;
4007                 for (i = 0; i < vi->max_queue_pairs; i++) {
4008                         rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
4009                         if (i == 0 && !old_prog)
4010                                 virtnet_clear_guest_offloads(vi);
4011                 }
4012                 if (!old_prog)
4013                         xdp_features_set_redirect_target(dev, true);
4014         } else {
4015                 xdp_features_clear_redirect_target(dev);
4016                 vi->xdp_enabled = false;
4017         }
4018
4019         for (i = 0; i < vi->max_queue_pairs; i++) {
4020                 if (old_prog)
4021                         bpf_prog_put(old_prog);
4022                 if (netif_running(dev)) {
4023                         virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
4024                         virtnet_napi_tx_enable(vi, vi->sq[i].vq,
4025                                                &vi->sq[i].napi);
4026                 }
4027         }
4028
4029         return 0;
4030
4031 err:
4032         if (!prog) {
4033                 virtnet_clear_guest_offloads(vi);
4034                 for (i = 0; i < vi->max_queue_pairs; i++)
4035                         rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
4036         }
4037
4038         if (netif_running(dev)) {
4039                 for (i = 0; i < vi->max_queue_pairs; i++) {
4040                         virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
4041                         virtnet_napi_tx_enable(vi, vi->sq[i].vq,
4042                                                &vi->sq[i].napi);
4043                 }
4044         }
4045         if (prog)
4046                 bpf_prog_sub(prog, vi->max_queue_pairs - 1);
4047         return err;
4048 }
4049
4050 static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4051 {
4052         switch (xdp->command) {
4053         case XDP_SETUP_PROG:
4054                 return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
4055         default:
4056                 return -EINVAL;
4057         }
4058 }
4059
4060 static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
4061                                       size_t len)
4062 {
4063         struct virtnet_info *vi = netdev_priv(dev);
4064         int ret;
4065
4066         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
4067                 return -EOPNOTSUPP;
4068
4069         ret = snprintf(buf, len, "sby");
4070         if (ret >= len)
4071                 return -EOPNOTSUPP;
4072
4073         return 0;
4074 }
4075
4076 static int virtnet_set_features(struct net_device *dev,
4077                                 netdev_features_t features)
4078 {
4079         struct virtnet_info *vi = netdev_priv(dev);
4080         u64 offloads;
4081         int err;
4082
4083         if ((dev->features ^ features) & NETIF_F_GRO_HW) {
4084                 if (vi->xdp_enabled)
4085                         return -EBUSY;
4086
4087                 if (features & NETIF_F_GRO_HW)
4088                         offloads = vi->guest_offloads_capable;
4089                 else
4090                         offloads = vi->guest_offloads_capable &
4091                                    ~GUEST_OFFLOAD_GRO_HW_MASK;
4092
4093                 err = virtnet_set_guest_offloads(vi, offloads);
4094                 if (err)
4095                         return err;
4096                 vi->guest_offloads = offloads;
4097         }
4098
4099         if ((dev->features ^ features) & NETIF_F_RXHASH) {
4100                 if (features & NETIF_F_RXHASH)
4101                         vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
4102                 else
4103                         vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
4104
4105                 if (!virtnet_commit_rss_command(vi))
4106                         return -EINVAL;
4107         }
4108
4109         return 0;
4110 }
4111
4112 static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
4113 {
4114         struct virtnet_info *priv = netdev_priv(dev);
4115         struct send_queue *sq = &priv->sq[txqueue];
4116         struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
4117
4118         u64_stats_update_begin(&sq->stats.syncp);
4119         u64_stats_inc(&sq->stats.tx_timeouts);
4120         u64_stats_update_end(&sq->stats.syncp);
4121
4122         netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
4123                    txqueue, sq->name, sq->vq->index, sq->vq->name,
4124                    jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
4125 }
4126
4127 static const struct net_device_ops virtnet_netdev = {
4128         .ndo_open            = virtnet_open,
4129         .ndo_stop            = virtnet_close,
4130         .ndo_start_xmit      = start_xmit,
4131         .ndo_validate_addr   = eth_validate_addr,
4132         .ndo_set_mac_address = virtnet_set_mac_address,
4133         .ndo_set_rx_mode     = virtnet_set_rx_mode,
4134         .ndo_get_stats64     = virtnet_stats,
4135         .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
4136         .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
4137         .ndo_bpf                = virtnet_xdp,
4138         .ndo_xdp_xmit           = virtnet_xdp_xmit,
4139         .ndo_features_check     = passthru_features_check,
4140         .ndo_get_phys_port_name = virtnet_get_phys_port_name,
4141         .ndo_set_features       = virtnet_set_features,
4142         .ndo_tx_timeout         = virtnet_tx_timeout,
4143 };
4144
4145 static void virtnet_config_changed_work(struct work_struct *work)
4146 {
4147         struct virtnet_info *vi =
4148                 container_of(work, struct virtnet_info, config_work);
4149         u16 v;
4150
4151         if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
4152                                  struct virtio_net_config, status, &v) < 0)
4153                 return;
4154
4155         if (v & VIRTIO_NET_S_ANNOUNCE) {
4156                 netdev_notify_peers(vi->dev);
4157                 virtnet_ack_link_announce(vi);
4158         }
4159
4160         /* Ignore unknown (future) status bits */
4161         v &= VIRTIO_NET_S_LINK_UP;
4162
4163         if (vi->status == v)
4164                 return;
4165
4166         vi->status = v;
4167
4168         if (vi->status & VIRTIO_NET_S_LINK_UP) {
4169                 virtnet_update_settings(vi);
4170                 netif_carrier_on(vi->dev);
4171                 netif_tx_wake_all_queues(vi->dev);
4172         } else {
4173                 netif_carrier_off(vi->dev);
4174                 netif_tx_stop_all_queues(vi->dev);
4175         }
4176 }
4177
4178 static void virtnet_config_changed(struct virtio_device *vdev)
4179 {
4180         struct virtnet_info *vi = vdev->priv;
4181
4182         schedule_work(&vi->config_work);
4183 }
4184
4185 static void virtnet_free_queues(struct virtnet_info *vi)
4186 {
4187         int i;
4188
4189         for (i = 0; i < vi->max_queue_pairs; i++) {
4190                 __netif_napi_del(&vi->rq[i].napi);
4191                 __netif_napi_del(&vi->sq[i].napi);
4192         }
4193
4194         /* We called __netif_napi_del(),
4195          * we need to respect an RCU grace period before freeing vi->rq
4196          */
4197         synchronize_net();
4198
4199         kfree(vi->rq);
4200         kfree(vi->sq);
4201         kfree(vi->ctrl);
4202 }
4203
4204 static void _free_receive_bufs(struct virtnet_info *vi)
4205 {
4206         struct bpf_prog *old_prog;
4207         int i;
4208
4209         for (i = 0; i < vi->max_queue_pairs; i++) {
4210                 while (vi->rq[i].pages)
4211                         __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
4212
4213                 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
4214                 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
4215                 if (old_prog)
4216                         bpf_prog_put(old_prog);
4217         }
4218 }
4219
4220 static void free_receive_bufs(struct virtnet_info *vi)
4221 {
4222         rtnl_lock();
4223         _free_receive_bufs(vi);
4224         rtnl_unlock();
4225 }
4226
4227 static void free_receive_page_frags(struct virtnet_info *vi)
4228 {
4229         int i;
4230         for (i = 0; i < vi->max_queue_pairs; i++)
4231                 if (vi->rq[i].alloc_frag.page) {
4232                         if (vi->rq[i].do_dma && vi->rq[i].last_dma)
4233                                 virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
4234                         put_page(vi->rq[i].alloc_frag.page);
4235                 }
4236 }
4237
4238 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
4239 {
4240         if (!is_xdp_frame(buf))
4241                 dev_kfree_skb(buf);
4242         else
4243                 xdp_return_frame(ptr_to_xdp(buf));
4244 }
4245
4246 static void free_unused_bufs(struct virtnet_info *vi)
4247 {
4248         void *buf;
4249         int i;
4250
4251         for (i = 0; i < vi->max_queue_pairs; i++) {
4252                 struct virtqueue *vq = vi->sq[i].vq;
4253                 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
4254                         virtnet_sq_free_unused_buf(vq, buf);
4255                 cond_resched();
4256         }
4257
4258         for (i = 0; i < vi->max_queue_pairs; i++) {
4259                 struct virtqueue *vq = vi->rq[i].vq;
4260
4261                 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
4262                         virtnet_rq_unmap_free_buf(vq, buf);
4263                 cond_resched();
4264         }
4265 }
4266
4267 static void virtnet_del_vqs(struct virtnet_info *vi)
4268 {
4269         struct virtio_device *vdev = vi->vdev;
4270
4271         virtnet_clean_affinity(vi);
4272
4273         vdev->config->del_vqs(vdev);
4274
4275         virtnet_free_queues(vi);
4276 }
4277
4278 /* How large should a single buffer be so a queue full of these can fit at
4279  * least one full packet?
4280  * Logic below assumes the mergeable buffer header is used.
4281  */
4282 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
4283 {
4284         const unsigned int hdr_len = vi->hdr_len;
4285         unsigned int rq_size = virtqueue_get_vring_size(vq);
4286         unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
4287         unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
4288         unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
4289
4290         return max(max(min_buf_len, hdr_len) - hdr_len,
4291                    (unsigned int)GOOD_PACKET_LEN);
4292 }
4293
4294 static int virtnet_find_vqs(struct virtnet_info *vi)
4295 {
4296         vq_callback_t **callbacks;
4297         struct virtqueue **vqs;
4298         int ret = -ENOMEM;
4299         int i, total_vqs;
4300         const char **names;
4301         bool *ctx;
4302
4303         /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
4304          * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
4305          * possible control vq.
4306          */
4307         total_vqs = vi->max_queue_pairs * 2 +
4308                     virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
4309
4310         /* Allocate space for find_vqs parameters */
4311         vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
4312         if (!vqs)
4313                 goto err_vq;
4314         callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
4315         if (!callbacks)
4316                 goto err_callback;
4317         names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
4318         if (!names)
4319                 goto err_names;
4320         if (!vi->big_packets || vi->mergeable_rx_bufs) {
4321                 ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
4322                 if (!ctx)
4323                         goto err_ctx;
4324         } else {
4325                 ctx = NULL;
4326         }
4327
4328         /* Parameters for control virtqueue, if any */
4329         if (vi->has_cvq) {
4330                 callbacks[total_vqs - 1] = NULL;
4331                 names[total_vqs - 1] = "control";
4332         }
4333
4334         /* Allocate/initialize parameters for send/receive virtqueues */
4335         for (i = 0; i < vi->max_queue_pairs; i++) {
4336                 callbacks[rxq2vq(i)] = skb_recv_done;
4337                 callbacks[txq2vq(i)] = skb_xmit_done;
4338                 sprintf(vi->rq[i].name, "input.%d", i);
4339                 sprintf(vi->sq[i].name, "output.%d", i);
4340                 names[rxq2vq(i)] = vi->rq[i].name;
4341                 names[txq2vq(i)] = vi->sq[i].name;
4342                 if (ctx)
4343                         ctx[rxq2vq(i)] = true;
4344         }
4345
4346         ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
4347                                   names, ctx, NULL);
4348         if (ret)
4349                 goto err_find;
4350
4351         if (vi->has_cvq) {
4352                 vi->cvq = vqs[total_vqs - 1];
4353                 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
4354                         vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4355         }
4356
4357         for (i = 0; i < vi->max_queue_pairs; i++) {
4358                 vi->rq[i].vq = vqs[rxq2vq(i)];
4359                 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
4360                 vi->sq[i].vq = vqs[txq2vq(i)];
4361         }
4362
4363         /* run here: ret == 0. */
4364
4365
4366 err_find:
4367         kfree(ctx);
4368 err_ctx:
4369         kfree(names);
4370 err_names:
4371         kfree(callbacks);
4372 err_callback:
4373         kfree(vqs);
4374 err_vq:
4375         return ret;
4376 }
4377
4378 static int virtnet_alloc_queues(struct virtnet_info *vi)
4379 {
4380         int i;
4381
4382         if (vi->has_cvq) {
4383                 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
4384                 if (!vi->ctrl)
4385                         goto err_ctrl;
4386         } else {
4387                 vi->ctrl = NULL;
4388         }
4389         vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
4390         if (!vi->sq)
4391                 goto err_sq;
4392         vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
4393         if (!vi->rq)
4394                 goto err_rq;
4395
4396         INIT_DELAYED_WORK(&vi->refill, refill_work);
4397         for (i = 0; i < vi->max_queue_pairs; i++) {
4398                 vi->rq[i].pages = NULL;
4399                 netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll,
4400                                       napi_weight);
4401                 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
4402                                          virtnet_poll_tx,
4403                                          napi_tx ? napi_weight : 0);
4404
4405                 INIT_WORK(&vi->rq[i].dim.work, virtnet_rx_dim_work);
4406                 vi->rq[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4407
4408                 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
4409                 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
4410                 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
4411
4412                 u64_stats_init(&vi->rq[i].stats.syncp);
4413                 u64_stats_init(&vi->sq[i].stats.syncp);
4414         }
4415
4416         return 0;
4417
4418 err_rq:
4419         kfree(vi->sq);
4420 err_sq:
4421         kfree(vi->ctrl);
4422 err_ctrl:
4423         return -ENOMEM;
4424 }
4425
4426 static int init_vqs(struct virtnet_info *vi)
4427 {
4428         int ret;
4429
4430         /* Allocate send & receive queues */
4431         ret = virtnet_alloc_queues(vi);
4432         if (ret)
4433                 goto err;
4434
4435         ret = virtnet_find_vqs(vi);
4436         if (ret)
4437                 goto err_free;
4438
4439         virtnet_rq_set_premapped(vi);
4440
4441         cpus_read_lock();
4442         virtnet_set_affinity(vi);
4443         cpus_read_unlock();
4444
4445         return 0;
4446
4447 err_free:
4448         virtnet_free_queues(vi);
4449 err:
4450         return ret;
4451 }
4452
4453 #ifdef CONFIG_SYSFS
4454 static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
4455                 char *buf)
4456 {
4457         struct virtnet_info *vi = netdev_priv(queue->dev);
4458         unsigned int queue_index = get_netdev_rx_queue_index(queue);
4459         unsigned int headroom = virtnet_get_headroom(vi);
4460         unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
4461         struct ewma_pkt_len *avg;
4462
4463         BUG_ON(queue_index >= vi->max_queue_pairs);
4464         avg = &vi->rq[queue_index].mrg_avg_pkt_len;
4465         return sprintf(buf, "%u\n",
4466                        get_mergeable_buf_len(&vi->rq[queue_index], avg,
4467                                        SKB_DATA_ALIGN(headroom + tailroom)));
4468 }
4469
4470 static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
4471         __ATTR_RO(mergeable_rx_buffer_size);
4472
4473 static struct attribute *virtio_net_mrg_rx_attrs[] = {
4474         &mergeable_rx_buffer_size_attribute.attr,
4475         NULL
4476 };
4477
4478 static const struct attribute_group virtio_net_mrg_rx_group = {
4479         .name = "virtio_net",
4480         .attrs = virtio_net_mrg_rx_attrs
4481 };
4482 #endif
4483
4484 static bool virtnet_fail_on_feature(struct virtio_device *vdev,
4485                                     unsigned int fbit,
4486                                     const char *fname, const char *dname)
4487 {
4488         if (!virtio_has_feature(vdev, fbit))
4489                 return false;
4490
4491         dev_err(&vdev->dev, "device advertises feature %s but not %s",
4492                 fname, dname);
4493
4494         return true;
4495 }
4496
4497 #define VIRTNET_FAIL_ON(vdev, fbit, dbit)                       \
4498         virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
4499
4500 static bool virtnet_validate_features(struct virtio_device *vdev)
4501 {
4502         if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
4503             (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
4504                              "VIRTIO_NET_F_CTRL_VQ") ||
4505              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
4506                              "VIRTIO_NET_F_CTRL_VQ") ||
4507              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
4508                              "VIRTIO_NET_F_CTRL_VQ") ||
4509              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
4510              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
4511                              "VIRTIO_NET_F_CTRL_VQ") ||
4512              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
4513                              "VIRTIO_NET_F_CTRL_VQ") ||
4514              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
4515                              "VIRTIO_NET_F_CTRL_VQ") ||
4516              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
4517                              "VIRTIO_NET_F_CTRL_VQ") ||
4518              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_VQ_NOTF_COAL,
4519                              "VIRTIO_NET_F_CTRL_VQ"))) {
4520                 return false;
4521         }
4522
4523         return true;
4524 }
4525
4526 #define MIN_MTU ETH_MIN_MTU
4527 #define MAX_MTU ETH_MAX_MTU
4528
4529 static int virtnet_validate(struct virtio_device *vdev)
4530 {
4531         if (!vdev->config->get) {
4532                 dev_err(&vdev->dev, "%s failure: config access disabled\n",
4533                         __func__);
4534                 return -EINVAL;
4535         }
4536
4537         if (!virtnet_validate_features(vdev))
4538                 return -EINVAL;
4539
4540         if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
4541                 int mtu = virtio_cread16(vdev,
4542                                          offsetof(struct virtio_net_config,
4543                                                   mtu));
4544                 if (mtu < MIN_MTU)
4545                         __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
4546         }
4547
4548         if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) &&
4549             !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
4550                 dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby");
4551                 __virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY);
4552         }
4553
4554         return 0;
4555 }
4556
4557 static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
4558 {
4559         return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
4560                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
4561                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
4562                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
4563                 (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) &&
4564                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6));
4565 }
4566
4567 static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
4568 {
4569         bool guest_gso = virtnet_check_guest_gso(vi);
4570
4571         /* If device can receive ANY guest GSO packets, regardless of mtu,
4572          * allocate packets of maximum size, otherwise limit it to only
4573          * mtu size worth only.
4574          */
4575         if (mtu > ETH_DATA_LEN || guest_gso) {
4576                 vi->big_packets = true;
4577                 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE);
4578         }
4579 }
4580
4581 static int virtnet_probe(struct virtio_device *vdev)
4582 {
4583         int i, err = -ENOMEM;
4584         struct net_device *dev;
4585         struct virtnet_info *vi;
4586         u16 max_queue_pairs;
4587         int mtu = 0;
4588
4589         /* Find if host supports multiqueue/rss virtio_net device */
4590         max_queue_pairs = 1;
4591         if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
4592                 max_queue_pairs =
4593                      virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs));
4594
4595         /* We need at least 2 queue's */
4596         if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
4597             max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
4598             !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
4599                 max_queue_pairs = 1;
4600
4601         /* Allocate ourselves a network device with room for our info */
4602         dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
4603         if (!dev)
4604                 return -ENOMEM;
4605
4606         /* Set up network device as normal. */
4607         dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
4608                            IFF_TX_SKB_NO_LINEAR;
4609         dev->netdev_ops = &virtnet_netdev;
4610         dev->features = NETIF_F_HIGHDMA;
4611
4612         dev->ethtool_ops = &virtnet_ethtool_ops;
4613         SET_NETDEV_DEV(dev, &vdev->dev);
4614
4615         /* Do we support "hardware" checksums? */
4616         if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
4617                 /* This opens up the world of extra features. */
4618                 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
4619                 if (csum)
4620                         dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
4621
4622                 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
4623                         dev->hw_features |= NETIF_F_TSO
4624                                 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
4625                 }
4626                 /* Individual feature bits: what can host handle? */
4627                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
4628                         dev->hw_features |= NETIF_F_TSO;
4629                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
4630                         dev->hw_features |= NETIF_F_TSO6;
4631                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
4632                         dev->hw_features |= NETIF_F_TSO_ECN;
4633                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO))
4634                         dev->hw_features |= NETIF_F_GSO_UDP_L4;
4635
4636                 dev->features |= NETIF_F_GSO_ROBUST;
4637
4638                 if (gso)
4639                         dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
4640                 /* (!csum && gso) case will be fixed by register_netdev() */
4641         }
4642         if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
4643                 dev->features |= NETIF_F_RXCSUM;
4644         if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
4645             virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
4646                 dev->features |= NETIF_F_GRO_HW;
4647         if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
4648                 dev->hw_features |= NETIF_F_GRO_HW;
4649
4650         dev->vlan_features = dev->features;
4651         dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
4652
4653         /* MTU range: 68 - 65535 */
4654         dev->min_mtu = MIN_MTU;
4655         dev->max_mtu = MAX_MTU;
4656
4657         /* Configuration may specify what MAC to use.  Otherwise random. */
4658         if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
4659                 u8 addr[ETH_ALEN];
4660
4661                 virtio_cread_bytes(vdev,
4662                                    offsetof(struct virtio_net_config, mac),
4663                                    addr, ETH_ALEN);
4664                 eth_hw_addr_set(dev, addr);
4665         } else {
4666                 eth_hw_addr_random(dev);
4667                 dev_info(&vdev->dev, "Assigned random MAC address %pM\n",
4668                          dev->dev_addr);
4669         }
4670
4671         /* Set up our device-specific information */
4672         vi = netdev_priv(dev);
4673         vi->dev = dev;
4674         vi->vdev = vdev;
4675         vdev->priv = vi;
4676
4677         INIT_WORK(&vi->config_work, virtnet_config_changed_work);
4678         spin_lock_init(&vi->refill_lock);
4679
4680         if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
4681                 vi->mergeable_rx_bufs = true;
4682                 dev->xdp_features |= NETDEV_XDP_ACT_RX_SG;
4683         }
4684
4685         if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
4686                 vi->has_rss_hash_report = true;
4687
4688         if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
4689                 vi->has_rss = true;
4690
4691         if (vi->has_rss || vi->has_rss_hash_report) {
4692                 vi->rss_indir_table_size =
4693                         virtio_cread16(vdev, offsetof(struct virtio_net_config,
4694                                 rss_max_indirection_table_length));
4695                 vi->rss_key_size =
4696                         virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
4697
4698                 vi->rss_hash_types_supported =
4699                     virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
4700                 vi->rss_hash_types_supported &=
4701                                 ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX |
4702                                   VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
4703                                   VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);
4704
4705                 dev->hw_features |= NETIF_F_RXHASH;
4706         }
4707
4708         if (vi->has_rss_hash_report)
4709                 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
4710         else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
4711                  virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
4712                 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
4713         else
4714                 vi->hdr_len = sizeof(struct virtio_net_hdr);
4715
4716         if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
4717             virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
4718                 vi->any_header_sg = true;
4719
4720         if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
4721                 vi->has_cvq = true;
4722
4723         if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
4724                 mtu = virtio_cread16(vdev,
4725                                      offsetof(struct virtio_net_config,
4726                                               mtu));
4727                 if (mtu < dev->min_mtu) {
4728                         /* Should never trigger: MTU was previously validated
4729                          * in virtnet_validate.
4730                          */
4731                         dev_err(&vdev->dev,
4732                                 "device MTU appears to have changed it is now %d < %d",
4733                                 mtu, dev->min_mtu);
4734                         err = -EINVAL;
4735                         goto free;
4736                 }
4737
4738                 dev->mtu = mtu;
4739                 dev->max_mtu = mtu;
4740         }
4741
4742         virtnet_set_big_packets(vi, mtu);
4743
4744         if (vi->any_header_sg)
4745                 dev->needed_headroom = vi->hdr_len;
4746
4747         /* Enable multiqueue by default */
4748         if (num_online_cpus() >= max_queue_pairs)
4749                 vi->curr_queue_pairs = max_queue_pairs;
4750         else
4751                 vi->curr_queue_pairs = num_online_cpus();
4752         vi->max_queue_pairs = max_queue_pairs;
4753
4754         /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
4755         err = init_vqs(vi);
4756         if (err)
4757                 goto free;
4758
4759         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
4760                 vi->intr_coal_rx.max_usecs = 0;
4761                 vi->intr_coal_tx.max_usecs = 0;
4762                 vi->intr_coal_rx.max_packets = 0;
4763
4764                 /* Keep the default values of the coalescing parameters
4765                  * aligned with the default napi_tx state.
4766                  */
4767                 if (vi->sq[0].napi.weight)
4768                         vi->intr_coal_tx.max_packets = 1;
4769                 else
4770                         vi->intr_coal_tx.max_packets = 0;
4771         }
4772
4773         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
4774                 /* The reason is the same as VIRTIO_NET_F_NOTF_COAL. */
4775                 for (i = 0; i < vi->max_queue_pairs; i++)
4776                         if (vi->sq[i].napi.weight)
4777                                 vi->sq[i].intr_coal.max_packets = 1;
4778         }
4779
4780 #ifdef CONFIG_SYSFS
4781         if (vi->mergeable_rx_bufs)
4782                 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
4783 #endif
4784         netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
4785         netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
4786
4787         virtnet_init_settings(dev);
4788
4789         if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
4790                 vi->failover = net_failover_create(vi->dev);
4791                 if (IS_ERR(vi->failover)) {
4792                         err = PTR_ERR(vi->failover);
4793                         goto free_vqs;
4794                 }
4795         }
4796
4797         if (vi->has_rss || vi->has_rss_hash_report)
4798                 virtnet_init_default_rss(vi);
4799
4800         /* serialize netdev register + virtio_device_ready() with ndo_open() */
4801         rtnl_lock();
4802
4803         err = register_netdevice(dev);
4804         if (err) {
4805                 pr_debug("virtio_net: registering device failed\n");
4806                 rtnl_unlock();
4807                 goto free_failover;
4808         }
4809
4810         virtio_device_ready(vdev);
4811
4812         _virtnet_set_queues(vi, vi->curr_queue_pairs);
4813
4814         /* a random MAC address has been assigned, notify the device.
4815          * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
4816          * because many devices work fine without getting MAC explicitly
4817          */
4818         if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
4819             virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
4820                 struct scatterlist sg;
4821
4822                 sg_init_one(&sg, dev->dev_addr, dev->addr_len);
4823                 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
4824                                           VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
4825                         pr_debug("virtio_net: setting MAC address failed\n");
4826                         rtnl_unlock();
4827                         err = -EINVAL;
4828                         goto free_unregister_netdev;
4829                 }
4830         }
4831
4832         rtnl_unlock();
4833
4834         err = virtnet_cpu_notif_add(vi);
4835         if (err) {
4836                 pr_debug("virtio_net: registering cpu notifier failed\n");
4837                 goto free_unregister_netdev;
4838         }
4839
4840         /* Assume link up if device can't report link status,
4841            otherwise get link status from config. */
4842         netif_carrier_off(dev);
4843         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
4844                 schedule_work(&vi->config_work);
4845         } else {
4846                 vi->status = VIRTIO_NET_S_LINK_UP;
4847                 virtnet_update_settings(vi);
4848                 netif_carrier_on(dev);
4849         }
4850
4851         for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
4852                 if (virtio_has_feature(vi->vdev, guest_offloads[i]))
4853                         set_bit(guest_offloads[i], &vi->guest_offloads);
4854         vi->guest_offloads_capable = vi->guest_offloads;
4855
4856         pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
4857                  dev->name, max_queue_pairs);
4858
4859         return 0;
4860
4861 free_unregister_netdev:
4862         unregister_netdev(dev);
4863 free_failover:
4864         net_failover_destroy(vi->failover);
4865 free_vqs:
4866         virtio_reset_device(vdev);
4867         cancel_delayed_work_sync(&vi->refill);
4868         free_receive_page_frags(vi);
4869         virtnet_del_vqs(vi);
4870 free:
4871         free_netdev(dev);
4872         return err;
4873 }
4874
4875 static void remove_vq_common(struct virtnet_info *vi)
4876 {
4877         virtio_reset_device(vi->vdev);
4878
4879         /* Free unused buffers in both send and recv, if any. */
4880         free_unused_bufs(vi);
4881
4882         free_receive_bufs(vi);
4883
4884         free_receive_page_frags(vi);
4885
4886         virtnet_del_vqs(vi);
4887 }
4888
4889 static void virtnet_remove(struct virtio_device *vdev)
4890 {
4891         struct virtnet_info *vi = vdev->priv;
4892
4893         virtnet_cpu_notif_remove(vi);
4894
4895         /* Make sure no work handler is accessing the device. */
4896         flush_work(&vi->config_work);
4897
4898         unregister_netdev(vi->dev);
4899
4900         net_failover_destroy(vi->failover);
4901
4902         remove_vq_common(vi);
4903
4904         free_netdev(vi->dev);
4905 }
4906
4907 static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
4908 {
4909         struct virtnet_info *vi = vdev->priv;
4910
4911         virtnet_cpu_notif_remove(vi);
4912         virtnet_freeze_down(vdev);
4913         remove_vq_common(vi);
4914
4915         return 0;
4916 }
4917
4918 static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
4919 {
4920         struct virtnet_info *vi = vdev->priv;
4921         int err;
4922
4923         err = virtnet_restore_up(vdev);
4924         if (err)
4925                 return err;
4926         virtnet_set_queues(vi, vi->curr_queue_pairs);
4927
4928         err = virtnet_cpu_notif_add(vi);
4929         if (err) {
4930                 virtnet_freeze_down(vdev);
4931                 remove_vq_common(vi);
4932                 return err;
4933         }
4934
4935         return 0;
4936 }
4937
4938 static struct virtio_device_id id_table[] = {
4939         { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
4940         { 0 },
4941 };
4942
4943 #define VIRTNET_FEATURES \
4944         VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
4945         VIRTIO_NET_F_MAC, \
4946         VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
4947         VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
4948         VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
4949         VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \
4950         VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
4951         VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
4952         VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
4953         VIRTIO_NET_F_CTRL_MAC_ADDR, \
4954         VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
4955         VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
4956         VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
4957         VIRTIO_NET_F_VQ_NOTF_COAL, \
4958         VIRTIO_NET_F_GUEST_HDRLEN
4959
4960 static unsigned int features[] = {
4961         VIRTNET_FEATURES,
4962 };
4963
4964 static unsigned int features_legacy[] = {
4965         VIRTNET_FEATURES,
4966         VIRTIO_NET_F_GSO,
4967         VIRTIO_F_ANY_LAYOUT,
4968 };
4969
4970 static struct virtio_driver virtio_net_driver = {
4971         .feature_table = features,
4972         .feature_table_size = ARRAY_SIZE(features),
4973         .feature_table_legacy = features_legacy,
4974         .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
4975         .driver.name =  KBUILD_MODNAME,
4976         .driver.owner = THIS_MODULE,
4977         .id_table =     id_table,
4978         .validate =     virtnet_validate,
4979         .probe =        virtnet_probe,
4980         .remove =       virtnet_remove,
4981         .config_changed = virtnet_config_changed,
4982 #ifdef CONFIG_PM_SLEEP
4983         .freeze =       virtnet_freeze,
4984         .restore =      virtnet_restore,
4985 #endif
4986 };
4987
4988 static __init int virtio_net_driver_init(void)
4989 {
4990         int ret;
4991
4992         ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
4993                                       virtnet_cpu_online,
4994                                       virtnet_cpu_down_prep);
4995         if (ret < 0)
4996                 goto out;
4997         virtionet_online = ret;
4998         ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
4999                                       NULL, virtnet_cpu_dead);
5000         if (ret)
5001                 goto err_dead;
5002         ret = register_virtio_driver(&virtio_net_driver);
5003         if (ret)
5004                 goto err_virtio;
5005         return 0;
5006 err_virtio:
5007         cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
5008 err_dead:
5009         cpuhp_remove_multi_state(virtionet_online);
5010 out:
5011         return ret;
5012 }
5013 module_init(virtio_net_driver_init);
5014
5015 static __exit void virtio_net_driver_exit(void)
5016 {
5017         unregister_virtio_driver(&virtio_net_driver);
5018         cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
5019         cpuhp_remove_multi_state(virtionet_online);
5020 }
5021 module_exit(virtio_net_driver_exit);
5022
5023 MODULE_DEVICE_TABLE(virtio, id_table);
5024 MODULE_DESCRIPTION("Virtio network driver");
5025 MODULE_LICENSE("GPL");