Commit | Line | Data |
---|---|---|
1ccea77e | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
48925e37 | 2 | /* A network driver using virtio. |
296f96fc RR |
3 | * |
4 | * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation | |
296f96fc RR |
5 | */ |
6 | //#define DEBUG | |
7 | #include <linux/netdevice.h> | |
8 | #include <linux/etherdevice.h> | |
a9ea3fc6 | 9 | #include <linux/ethtool.h> |
296f96fc RR |
10 | #include <linux/module.h> |
11 | #include <linux/virtio.h> | |
12 | #include <linux/virtio_net.h> | |
f600b690 | 13 | #include <linux/bpf.h> |
a67edbf4 | 14 | #include <linux/bpf_trace.h> |
296f96fc | 15 | #include <linux/scatterlist.h> |
e918085a | 16 | #include <linux/if_vlan.h> |
5a0e3ad6 | 17 | #include <linux/slab.h> |
8de4b2f3 | 18 | #include <linux/cpu.h> |
ab7db917 | 19 | #include <linux/average.h> |
186b3c99 | 20 | #include <linux/filter.h> |
2ca653d6 | 21 | #include <linux/kernel.h> |
62087995 | 22 | #include <linux/dim.h> |
d85b758f | 23 | #include <net/route.h> |
754b8a21 | 24 | #include <net/xdp.h> |
ba5e4426 | 25 | #include <net/net_failover.h> |
49e47a5b | 26 | #include <net/netdev_rx_queue.h> |
296f96fc | 27 | |
d34710e3 | 28 | static int napi_weight = NAPI_POLL_WEIGHT; |
6c0cd7c0 DL |
29 | module_param(napi_weight, int, 0444); |
30 | ||
31c03aef | 31 | static bool csum = true, gso = true, napi_tx = true; |
34a48579 RR |
32 | module_param(csum, bool, 0444); |
33 | module_param(gso, bool, 0444); | |
b92f1e67 | 34 | module_param(napi_tx, bool, 0644); |
34a48579 | 35 | |
296f96fc | 36 | /* FIXME: MTU in config. */ |
5061de36 | 37 | #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) |
3f2c31d9 | 38 | #define GOOD_COPY_LEN 128 |
296f96fc | 39 | |
f6b10209 JW |
40 | #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) |
41 | ||
2de2f7f4 JF |
42 | /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ |
43 | #define VIRTIO_XDP_HEADROOM 256 | |
44 | ||
2471c75e JDB |
45 | /* Separating two types of XDP xmit */ |
46 | #define VIRTIO_XDP_TX BIT(0) | |
47 | #define VIRTIO_XDP_REDIR BIT(1) | |
48 | ||
5050471d TM |
49 | #define VIRTIO_XDP_FLAG BIT(0) |
50 | ||
5377d758 JB |
51 | /* RX packet size EWMA. The average packet size is used to determine the packet |
52 | * buffer size when refilling RX rings. As the entire RX ring may be refilled | |
53 | * at once, the weight is chosen so that the EWMA will be insensitive to short- | |
54 | * term, transient changes in packet size. | |
ab7db917 | 55 | */ |
eb1e011a | 56 | DECLARE_EWMA(pkt_len, 0, 64) |
ab7db917 | 57 | |
66846048 | 58 | #define VIRTNET_DRIVER_VERSION "1.0.0" |
2a41f71d | 59 | |
7acd4329 CIK |
60 | static const unsigned long guest_offloads[] = { |
61 | VIRTIO_NET_F_GUEST_TSO4, | |
62 | VIRTIO_NET_F_GUEST_TSO6, | |
63 | VIRTIO_NET_F_GUEST_ECN, | |
e59ff2c4 | 64 | VIRTIO_NET_F_GUEST_UFO, |
418044e1 AM |
65 | VIRTIO_NET_F_GUEST_CSUM, |
66 | VIRTIO_NET_F_GUEST_USO4, | |
be50da3e JP |
67 | VIRTIO_NET_F_GUEST_USO6, |
68 | VIRTIO_NET_F_GUEST_HDRLEN | |
7acd4329 | 69 | }; |
3f93522f | 70 | |
dbcf24d1 | 71 | #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \ |
1a03b8a3 TZ |
72 | (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \ |
73 | (1ULL << VIRTIO_NET_F_GUEST_ECN) | \ | |
418044e1 AM |
74 | (1ULL << VIRTIO_NET_F_GUEST_UFO) | \ |
75 | (1ULL << VIRTIO_NET_F_GUEST_USO4) | \ | |
76 | (1ULL << VIRTIO_NET_F_GUEST_USO6)) | |
1a03b8a3 | 77 | |
d7dfc5cf TM |
78 | struct virtnet_stat_desc { |
79 | char desc[ETH_GSTRING_LEN]; | |
80 | size_t offset; | |
3fa2a1df | 81 | }; |
82 | ||
b1dc24ab XZ |
83 | struct virtnet_sq_free_stats { |
84 | u64 packets; | |
85 | u64 bytes; | |
86 | }; | |
87 | ||
d7dfc5cf TM |
88 | struct virtnet_sq_stats { |
89 | struct u64_stats_sync syncp; | |
61217d8f ED |
90 | u64_stats_t packets; |
91 | u64_stats_t bytes; | |
92 | u64_stats_t xdp_tx; | |
93 | u64_stats_t xdp_tx_drops; | |
94 | u64_stats_t kicks; | |
95 | u64_stats_t tx_timeouts; | |
d7dfc5cf TM |
96 | }; |
97 | ||
d46eeeaf JW |
98 | struct virtnet_rq_stats { |
99 | struct u64_stats_sync syncp; | |
61217d8f ED |
100 | u64_stats_t packets; |
101 | u64_stats_t bytes; | |
102 | u64_stats_t drops; | |
103 | u64_stats_t xdp_packets; | |
104 | u64_stats_t xdp_tx; | |
105 | u64_stats_t xdp_redirects; | |
106 | u64_stats_t xdp_drops; | |
107 | u64_stats_t kicks; | |
d7dfc5cf TM |
108 | }; |
109 | ||
110 | #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) | |
d46eeeaf | 111 | #define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m) |
d7dfc5cf TM |
112 | |
113 | static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { | |
5b8f3c8d TM |
114 | { "packets", VIRTNET_SQ_STAT(packets) }, |
115 | { "bytes", VIRTNET_SQ_STAT(bytes) }, | |
116 | { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) }, | |
117 | { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) }, | |
461f03dc | 118 | { "kicks", VIRTNET_SQ_STAT(kicks) }, |
a520794b | 119 | { "tx_timeouts", VIRTNET_SQ_STAT(tx_timeouts) }, |
d7dfc5cf TM |
120 | }; |
121 | ||
122 | static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { | |
5b8f3c8d TM |
123 | { "packets", VIRTNET_RQ_STAT(packets) }, |
124 | { "bytes", VIRTNET_RQ_STAT(bytes) }, | |
125 | { "drops", VIRTNET_RQ_STAT(drops) }, | |
126 | { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) }, | |
127 | { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) }, | |
128 | { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) }, | |
129 | { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) }, | |
461f03dc | 130 | { "kicks", VIRTNET_RQ_STAT(kicks) }, |
d7dfc5cf TM |
131 | }; |
132 | ||
133 | #define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc) | |
134 | #define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc) | |
135 | ||
308d7982 GL |
136 | struct virtnet_interrupt_coalesce { |
137 | u32 max_packets; | |
138 | u32 max_usecs; | |
139 | }; | |
140 | ||
295525e2 XZ |
141 | /* The dma information of pages allocated at a time. */ |
142 | struct virtnet_rq_dma { | |
143 | dma_addr_t addr; | |
144 | u32 ref; | |
145 | u16 len; | |
146 | u16 need_sync; | |
147 | }; | |
148 | ||
e9d7417b JW |
149 | /* Internal representation of a send virtqueue */ |
150 | struct send_queue { | |
151 | /* Virtqueue associated with this send _queue */ | |
152 | struct virtqueue *vq; | |
153 | ||
154 | /* TX: fragments + linear part + virtio header */ | |
155 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; | |
986a4f4d JW |
156 | |
157 | /* Name of the send queue: output.$index */ | |
d0671115 | 158 | char name[16]; |
b92f1e67 | 159 | |
d7dfc5cf TM |
160 | struct virtnet_sq_stats stats; |
161 | ||
394bd877 GL |
162 | struct virtnet_interrupt_coalesce intr_coal; |
163 | ||
b92f1e67 | 164 | struct napi_struct napi; |
ebcce492 XZ |
165 | |
166 | /* Record whether sq is in reset state. */ | |
167 | bool reset; | |
e9d7417b JW |
168 | }; |
169 | ||
170 | /* Internal representation of a receive virtqueue */ | |
171 | struct receive_queue { | |
172 | /* Virtqueue associated with this receive_queue */ | |
173 | struct virtqueue *vq; | |
174 | ||
296f96fc RR |
175 | struct napi_struct napi; |
176 | ||
f600b690 JF |
177 | struct bpf_prog __rcu *xdp_prog; |
178 | ||
d7dfc5cf TM |
179 | struct virtnet_rq_stats stats; |
180 | ||
62087995 HQ |
181 | /* The number of rx notifications */ |
182 | u16 calls; | |
183 | ||
184 | /* Is dynamic interrupt moderation enabled? */ | |
185 | bool dim_enabled; | |
186 | ||
187 | /* Dynamic Interrupt Moderation */ | |
188 | struct dim dim; | |
189 | ||
190 | u32 packets_in_napi; | |
191 | ||
394bd877 GL |
192 | struct virtnet_interrupt_coalesce intr_coal; |
193 | ||
e9d7417b JW |
194 | /* Chain pages by the private ptr. */ |
195 | struct page *pages; | |
196 | ||
ab7db917 | 197 | /* Average packet length for mergeable receive buffers. */ |
5377d758 | 198 | struct ewma_pkt_len mrg_avg_pkt_len; |
ab7db917 | 199 | |
fb51879d MD |
200 | /* Page frag for packet buffer allocation. */ |
201 | struct page_frag alloc_frag; | |
202 | ||
e9d7417b JW |
203 | /* RX: fragments + linear part + virtio header */ |
204 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; | |
986a4f4d | 205 | |
d85b758f MT |
206 | /* Min single buffer size for mergeable buffers case. */ |
207 | unsigned int min_buf_len; | |
208 | ||
986a4f4d | 209 | /* Name of this receive queue: input.$index */ |
d0671115 | 210 | char name[16]; |
754b8a21 JDB |
211 | |
212 | struct xdp_rxq_info xdp_rxq; | |
295525e2 XZ |
213 | |
214 | /* Record the last dma info to free after new pages is allocated. */ | |
215 | struct virtnet_rq_dma *last_dma; | |
216 | ||
217 | /* Do dma by self */ | |
218 | bool do_dma; | |
e9d7417b JW |
219 | }; |
220 | ||
c7114b12 AM |
221 | /* This structure can contain rss message with maximum settings for indirection table and keysize |
222 | * Note, that default structure that describes RSS configuration virtio_net_rss_config | |
223 | * contains same info but can't handle table values. | |
224 | * In any case, structure would be passed to virtio hw through sg_buf split by parts | |
225 | * because table sizes may be differ according to the device configuration. | |
226 | */ | |
227 | #define VIRTIO_NET_RSS_MAX_KEY_SIZE 40 | |
228 | #define VIRTIO_NET_RSS_MAX_TABLE_LEN 128 | |
229 | struct virtio_net_ctrl_rss { | |
230 | u32 hash_types; | |
231 | u16 indirection_table_mask; | |
232 | u16 unclassified_queue; | |
233 | u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN]; | |
234 | u16 max_tx_vq; | |
235 | u8 hash_key_length; | |
236 | u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE]; | |
237 | }; | |
238 | ||
12e57169 MT |
239 | /* Control VQ buffers: protected by the rtnl lock */ |
240 | struct control_buf { | |
241 | struct virtio_net_ctrl_hdr hdr; | |
242 | virtio_net_ctrl_ack status; | |
243 | struct virtio_net_ctrl_mq mq; | |
244 | u8 promisc; | |
245 | u8 allmulti; | |
d7fad4c8 | 246 | __virtio16 vid; |
f4ee703a | 247 | __virtio64 offloads; |
c7114b12 | 248 | struct virtio_net_ctrl_rss rss; |
accc1bf2 BC |
249 | struct virtio_net_ctrl_coal_tx coal_tx; |
250 | struct virtio_net_ctrl_coal_rx coal_rx; | |
394bd877 | 251 | struct virtio_net_ctrl_coal_vq coal_vq; |
12e57169 MT |
252 | }; |
253 | ||
e9d7417b JW |
254 | struct virtnet_info { |
255 | struct virtio_device *vdev; | |
256 | struct virtqueue *cvq; | |
257 | struct net_device *dev; | |
986a4f4d JW |
258 | struct send_queue *sq; |
259 | struct receive_queue *rq; | |
e9d7417b JW |
260 | unsigned int status; |
261 | ||
986a4f4d JW |
262 | /* Max # of queue pairs supported by the device */ |
263 | u16 max_queue_pairs; | |
264 | ||
265 | /* # of queue pairs currently used by the driver */ | |
266 | u16 curr_queue_pairs; | |
267 | ||
672aafd5 JF |
268 | /* # of XDP queue pairs currently used by the driver */ |
269 | u16 xdp_queue_pairs; | |
270 | ||
97c2c69e XZ |
271 | /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */ |
272 | bool xdp_enabled; | |
273 | ||
97402b96 HX |
274 | /* I like... big packets and I cannot lie! */ |
275 | bool big_packets; | |
276 | ||
4959aebb GL |
277 | /* number of sg entries allocated for big packets */ |
278 | unsigned int big_packets_num_skbfrags; | |
279 | ||
3f2c31d9 MM |
280 | /* Host will merge rx buffers for big packets (shake it! shake it!) */ |
281 | bool mergeable_rx_bufs; | |
282 | ||
c7114b12 AM |
283 | /* Host supports rss and/or hash report */ |
284 | bool has_rss; | |
91f41f01 | 285 | bool has_rss_hash_report; |
c7114b12 AM |
286 | u8 rss_key_size; |
287 | u16 rss_indir_table_size; | |
288 | u32 rss_hash_types_supported; | |
c1170820 | 289 | u32 rss_hash_types_saved; |
c7114b12 | 290 | |
986a4f4d JW |
291 | /* Has control virtqueue */ |
292 | bool has_cvq; | |
293 | ||
e7428e95 MT |
294 | /* Host can handle any s/g split between our header and packet data */ |
295 | bool any_header_sg; | |
296 | ||
012873d0 MT |
297 | /* Packet virtio header size */ |
298 | u8 hdr_len; | |
299 | ||
5a159128 | 300 | /* Work struct for delayed refilling if we run low on memory. */ |
3161e453 RR |
301 | struct delayed_work refill; |
302 | ||
5a159128 JW |
303 | /* Is delayed refill enabled? */ |
304 | bool refill_enabled; | |
305 | ||
306 | /* The lock to synchronize the access to refill_enabled */ | |
307 | spinlock_t refill_lock; | |
308 | ||
586d17c5 JW |
309 | /* Work struct for config space updates */ |
310 | struct work_struct config_work; | |
311 | ||
b9f74252 JW |
312 | /* Work struct for setting rx mode */ |
313 | struct work_struct rx_mode_work; | |
314 | ||
315 | /* OK to queue work setting RX mode? */ | |
316 | bool rx_mode_work_enabled; | |
317 | ||
986a4f4d JW |
318 | /* Does the affinity hint is set for virtqueues? */ |
319 | bool affinity_hint_set; | |
47be2479 | 320 | |
8017c279 SAS |
321 | /* CPU hotplug instances for online & dead */ |
322 | struct hlist_node node; | |
323 | struct hlist_node node_dead; | |
2ac46030 | 324 | |
12e57169 | 325 | struct control_buf *ctrl; |
16032be5 NA |
326 | |
327 | /* Ethtool settings */ | |
328 | u8 duplex; | |
329 | u32 speed; | |
3f93522f | 330 | |
62087995 HQ |
331 | /* Is rx dynamic interrupt moderation enabled? */ |
332 | bool rx_dim_enabled; | |
333 | ||
699b045a | 334 | /* Interrupt coalescing settings */ |
308d7982 GL |
335 | struct virtnet_interrupt_coalesce intr_coal_tx; |
336 | struct virtnet_interrupt_coalesce intr_coal_rx; | |
699b045a | 337 | |
3f93522f | 338 | unsigned long guest_offloads; |
a02e8964 | 339 | unsigned long guest_offloads_capable; |
ba5e4426 SS |
340 | |
341 | /* failover when STANDBY feature enabled */ | |
342 | struct failover *failover; | |
296f96fc RR |
343 | }; |
344 | ||
9ab86bbc | 345 | struct padded_vnet_hdr { |
c1ddc42d | 346 | struct virtio_net_hdr_v1_hash hdr; |
9ab86bbc | 347 | /* |
012873d0 MT |
348 | * hdr is in a separate sg buffer, and data sg buffer shares same page |
349 | * with this header sg. This padding makes next sg 16 byte aligned | |
350 | * after the header. | |
9ab86bbc | 351 | */ |
c1ddc42d | 352 | char padding[12]; |
9ab86bbc SM |
353 | }; |
354 | ||
dae64749 FL |
355 | struct virtio_net_common_hdr { |
356 | union { | |
357 | struct virtio_net_hdr hdr; | |
358 | struct virtio_net_hdr_mrg_rxbuf mrg_hdr; | |
359 | struct virtio_net_hdr_v1_hash hash_v1_hdr; | |
360 | }; | |
361 | }; | |
362 | ||
ebcce492 | 363 | static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); |
6a4763e2 | 364 | |
5050471d TM |
365 | static bool is_xdp_frame(void *ptr) |
366 | { | |
367 | return (unsigned long)ptr & VIRTIO_XDP_FLAG; | |
368 | } | |
369 | ||
370 | static void *xdp_to_ptr(struct xdp_frame *ptr) | |
371 | { | |
372 | return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG); | |
373 | } | |
374 | ||
375 | static struct xdp_frame *ptr_to_xdp(void *ptr) | |
376 | { | |
377 | return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG); | |
378 | } | |
379 | ||
b1dc24ab XZ |
380 | static void __free_old_xmit(struct send_queue *sq, bool in_napi, |
381 | struct virtnet_sq_free_stats *stats) | |
382 | { | |
383 | unsigned int len; | |
384 | void *ptr; | |
385 | ||
386 | while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { | |
387 | ++stats->packets; | |
388 | ||
389 | if (!is_xdp_frame(ptr)) { | |
390 | struct sk_buff *skb = ptr; | |
391 | ||
392 | pr_debug("Sent skb %p\n", skb); | |
393 | ||
394 | stats->bytes += skb->len; | |
395 | napi_consume_skb(skb, in_napi); | |
396 | } else { | |
397 | struct xdp_frame *frame = ptr_to_xdp(ptr); | |
398 | ||
399 | stats->bytes += xdp_get_frame_len(frame); | |
400 | xdp_return_frame(frame); | |
401 | } | |
402 | } | |
403 | } | |
404 | ||
986a4f4d JW |
405 | /* Converting between virtqueue no. and kernel tx/rx queue no. |
406 | * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq | |
407 | */ | |
408 | static int vq2txq(struct virtqueue *vq) | |
409 | { | |
9d0ca6ed | 410 | return (vq->index - 1) / 2; |
986a4f4d JW |
411 | } |
412 | ||
413 | static int txq2vq(int txq) | |
414 | { | |
415 | return txq * 2 + 1; | |
416 | } | |
417 | ||
418 | static int vq2rxq(struct virtqueue *vq) | |
419 | { | |
9d0ca6ed | 420 | return vq->index / 2; |
986a4f4d JW |
421 | } |
422 | ||
423 | static int rxq2vq(int rxq) | |
424 | { | |
425 | return rxq * 2; | |
426 | } | |
427 | ||
dae64749 FL |
428 | static inline struct virtio_net_common_hdr * |
429 | skb_vnet_common_hdr(struct sk_buff *skb) | |
296f96fc | 430 | { |
dae64749 | 431 | return (struct virtio_net_common_hdr *)skb->cb; |
296f96fc RR |
432 | } |
433 | ||
9ab86bbc SM |
434 | /* |
435 | * private is used to chain pages for big packets, put the whole | |
436 | * most recent used list in the beginning for reuse | |
437 | */ | |
e9d7417b | 438 | static void give_pages(struct receive_queue *rq, struct page *page) |
0a888fd1 | 439 | { |
9ab86bbc | 440 | struct page *end; |
0a888fd1 | 441 | |
e9d7417b | 442 | /* Find end of list, sew whole thing into vi->rq.pages. */ |
9ab86bbc | 443 | for (end = page; end->private; end = (struct page *)end->private); |
e9d7417b JW |
444 | end->private = (unsigned long)rq->pages; |
445 | rq->pages = page; | |
0a888fd1 MM |
446 | } |
447 | ||
e9d7417b | 448 | static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) |
fb6813f4 | 449 | { |
e9d7417b | 450 | struct page *p = rq->pages; |
fb6813f4 | 451 | |
9ab86bbc | 452 | if (p) { |
e9d7417b | 453 | rq->pages = (struct page *)p->private; |
9ab86bbc SM |
454 | /* clear private here, it is used to chain pages */ |
455 | p->private = 0; | |
456 | } else | |
fb6813f4 RR |
457 | p = alloc_page(gfp_mask); |
458 | return p; | |
459 | } | |
460 | ||
2311e06b XZ |
461 | static void virtnet_rq_free_buf(struct virtnet_info *vi, |
462 | struct receive_queue *rq, void *buf) | |
463 | { | |
464 | if (vi->mergeable_rx_bufs) | |
465 | put_page(virt_to_head_page(buf)); | |
466 | else if (vi->big_packets) | |
467 | give_pages(rq, buf); | |
468 | else | |
469 | put_page(virt_to_head_page(buf)); | |
470 | } | |
471 | ||
5a159128 JW |
472 | static void enable_delayed_refill(struct virtnet_info *vi) |
473 | { | |
474 | spin_lock_bh(&vi->refill_lock); | |
475 | vi->refill_enabled = true; | |
476 | spin_unlock_bh(&vi->refill_lock); | |
477 | } | |
478 | ||
479 | static void disable_delayed_refill(struct virtnet_info *vi) | |
480 | { | |
481 | spin_lock_bh(&vi->refill_lock); | |
482 | vi->refill_enabled = false; | |
483 | spin_unlock_bh(&vi->refill_lock); | |
484 | } | |
485 | ||
b9f74252 JW |
486 | static void enable_rx_mode_work(struct virtnet_info *vi) |
487 | { | |
488 | rtnl_lock(); | |
489 | vi->rx_mode_work_enabled = true; | |
490 | rtnl_unlock(); | |
491 | } | |
492 | ||
493 | static void disable_rx_mode_work(struct virtnet_info *vi) | |
494 | { | |
495 | rtnl_lock(); | |
496 | vi->rx_mode_work_enabled = false; | |
497 | rtnl_unlock(); | |
498 | } | |
499 | ||
e4e8452a WB |
500 | static void virtqueue_napi_schedule(struct napi_struct *napi, |
501 | struct virtqueue *vq) | |
502 | { | |
503 | if (napi_schedule_prep(napi)) { | |
504 | virtqueue_disable_cb(vq); | |
505 | __napi_schedule(napi); | |
506 | } | |
507 | } | |
508 | ||
7949c06a | 509 | static bool virtqueue_napi_complete(struct napi_struct *napi, |
e4e8452a WB |
510 | struct virtqueue *vq, int processed) |
511 | { | |
512 | int opaque; | |
513 | ||
514 | opaque = virtqueue_enable_cb_prepare(vq); | |
fdaa767a TM |
515 | if (napi_complete_done(napi, processed)) { |
516 | if (unlikely(virtqueue_poll(vq, opaque))) | |
517 | virtqueue_napi_schedule(napi, vq); | |
7949c06a HQ |
518 | else |
519 | return true; | |
fdaa767a TM |
520 | } else { |
521 | virtqueue_disable_cb(vq); | |
522 | } | |
7949c06a HQ |
523 | |
524 | return false; | |
e4e8452a WB |
525 | } |
526 | ||
e9d7417b | 527 | static void skb_xmit_done(struct virtqueue *vq) |
296f96fc | 528 | { |
e9d7417b | 529 | struct virtnet_info *vi = vq->vdev->priv; |
b92f1e67 | 530 | struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; |
296f96fc | 531 | |
2cb9c6ba | 532 | /* Suppress further interrupts. */ |
e9d7417b | 533 | virtqueue_disable_cb(vq); |
11a3a154 | 534 | |
b92f1e67 WB |
535 | if (napi->weight) |
536 | virtqueue_napi_schedule(napi, vq); | |
537 | else | |
538 | /* We were probably waiting for more output buffers. */ | |
539 | netif_wake_subqueue(vi->dev, vq2txq(vq)); | |
296f96fc RR |
540 | } |
541 | ||
28b39bc7 JW |
542 | #define MRG_CTX_HEADER_SHIFT 22 |
543 | static void *mergeable_len_to_ctx(unsigned int truesize, | |
544 | unsigned int headroom) | |
545 | { | |
546 | return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize); | |
547 | } | |
548 | ||
549 | static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx) | |
550 | { | |
551 | return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT; | |
552 | } | |
553 | ||
554 | static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) | |
555 | { | |
556 | return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); | |
557 | } | |
558 | ||
21e26a71 XZ |
559 | static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen, |
560 | unsigned int headroom, | |
561 | unsigned int len) | |
562 | { | |
563 | struct sk_buff *skb; | |
564 | ||
565 | skb = build_skb(buf, buflen); | |
566 | if (unlikely(!skb)) | |
567 | return NULL; | |
568 | ||
569 | skb_reserve(skb, headroom); | |
570 | skb_put(skb, len); | |
571 | ||
572 | return skb; | |
573 | } | |
574 | ||
3464645a | 575 | /* Called from bottom half context */ |
946fa564 MT |
576 | static struct sk_buff *page_to_skb(struct virtnet_info *vi, |
577 | struct receive_queue *rq, | |
2613af0e | 578 | struct page *page, unsigned int offset, |
fa0f1ba7 XZ |
579 | unsigned int len, unsigned int truesize, |
580 | unsigned int headroom) | |
9ab86bbc SM |
581 | { |
582 | struct sk_buff *skb; | |
dae64749 | 583 | struct virtio_net_common_hdr *hdr; |
2613af0e | 584 | unsigned int copy, hdr_len, hdr_padded_len; |
af39c8f7 | 585 | struct page *page_to_free = NULL; |
fb32856b | 586 | int tailroom, shinfo_size; |
f80bd740 | 587 | char *p, *hdr_p, *buf; |
fb6813f4 | 588 | |
2613af0e | 589 | p = page_address(page) + offset; |
fb32856b | 590 | hdr_p = p; |
3f2c31d9 | 591 | |
012873d0 MT |
592 | hdr_len = vi->hdr_len; |
593 | if (vi->mergeable_rx_bufs) | |
c1ddc42d | 594 | hdr_padded_len = hdr_len; |
012873d0 | 595 | else |
2613af0e | 596 | hdr_padded_len = sizeof(struct padded_vnet_hdr); |
3f2c31d9 | 597 | |
fa0f1ba7 | 598 | buf = p - headroom; |
9ab86bbc | 599 | len -= hdr_len; |
2613af0e MD |
600 | offset += hdr_padded_len; |
601 | p += hdr_padded_len; | |
fa0f1ba7 | 602 | tailroom = truesize - headroom - hdr_padded_len - len; |
3f2c31d9 | 603 | |
fb32856b XZ |
604 | shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
605 | ||
f80bd740 | 606 | /* copy small packet so we can reuse these pages */ |
f5d7872a | 607 | if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) { |
21e26a71 | 608 | skb = virtnet_build_skb(buf, truesize, p - buf, len); |
fb32856b XZ |
609 | if (unlikely(!skb)) |
610 | return NULL; | |
611 | ||
afd92d82 JW |
612 | page = (struct page *)page->private; |
613 | if (page) | |
614 | give_pages(rq, page); | |
fb32856b XZ |
615 | goto ok; |
616 | } | |
617 | ||
618 | /* copy small packet so we can reuse these pages for small data */ | |
619 | skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); | |
620 | if (unlikely(!skb)) | |
621 | return NULL; | |
622 | ||
0f6925b3 ED |
623 | /* Copy all frame if it fits skb->head, otherwise |
624 | * we let virtio_net_hdr_to_skb() and GRO pull headers as needed. | |
625 | */ | |
626 | if (len <= skb_tailroom(skb)) | |
627 | copy = len; | |
628 | else | |
18117a84 | 629 | copy = ETH_HLEN; |
59ae1d12 | 630 | skb_put_data(skb, p, copy); |
3f2c31d9 | 631 | |
9ab86bbc SM |
632 | len -= copy; |
633 | offset += copy; | |
3f2c31d9 | 634 | |
2613af0e MD |
635 | if (vi->mergeable_rx_bufs) { |
636 | if (len) | |
637 | skb_add_rx_frag(skb, 0, page, offset, len, truesize); | |
638 | else | |
af39c8f7 | 639 | page_to_free = page; |
fb32856b | 640 | goto ok; |
2613af0e MD |
641 | } |
642 | ||
e878d78b SL |
643 | /* |
644 | * Verify that we can indeed put this data into a skb. | |
645 | * This is here to handle cases when the device erroneously | |
646 | * tries to receive more than is possible. This is usually | |
647 | * the case of a broken device. | |
648 | */ | |
649 | if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { | |
be443899 | 650 | net_dbg_ratelimited("%s: too much data\n", skb->dev->name); |
e878d78b SL |
651 | dev_kfree_skb(skb); |
652 | return NULL; | |
653 | } | |
2613af0e | 654 | BUG_ON(offset >= PAGE_SIZE); |
9ab86bbc | 655 | while (len) { |
2613af0e MD |
656 | unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); |
657 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, | |
658 | frag_size, truesize); | |
659 | len -= frag_size; | |
9ab86bbc SM |
660 | page = (struct page *)page->private; |
661 | offset = 0; | |
662 | } | |
3f2c31d9 | 663 | |
9ab86bbc | 664 | if (page) |
e9d7417b | 665 | give_pages(rq, page); |
3f2c31d9 | 666 | |
fb32856b | 667 | ok: |
dae64749 | 668 | hdr = skb_vnet_common_hdr(skb); |
18117a84 | 669 | memcpy(hdr, hdr_p, hdr_len); |
af39c8f7 ED |
670 | if (page_to_free) |
671 | put_page(page_to_free); | |
fb32856b | 672 | |
9ab86bbc SM |
673 | return skb; |
674 | } | |
3f2c31d9 | 675 | |
295525e2 XZ |
676 | static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len) |
677 | { | |
678 | struct page *page = virt_to_head_page(buf); | |
679 | struct virtnet_rq_dma *dma; | |
680 | void *head; | |
681 | int offset; | |
682 | ||
683 | head = page_address(page); | |
684 | ||
685 | dma = head; | |
686 | ||
687 | --dma->ref; | |
688 | ||
5720c43d XZ |
689 | if (dma->need_sync && len) { |
690 | offset = buf - (head + sizeof(*dma)); | |
295525e2 | 691 | |
5720c43d XZ |
692 | virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr, |
693 | offset, len, | |
694 | DMA_FROM_DEVICE); | |
695 | } | |
295525e2 | 696 | |
5720c43d | 697 | if (dma->ref) |
295525e2 | 698 | return; |
295525e2 XZ |
699 | |
700 | virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len, | |
701 | DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); | |
702 | put_page(page); | |
703 | } | |
704 | ||
705 | static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx) | |
706 | { | |
707 | void *buf; | |
708 | ||
709 | buf = virtqueue_get_buf_ctx(rq->vq, len, ctx); | |
710 | if (buf && rq->do_dma) | |
711 | virtnet_rq_unmap(rq, buf, *len); | |
712 | ||
713 | return buf; | |
714 | } | |
715 | ||
295525e2 XZ |
716 | static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len) |
717 | { | |
718 | struct virtnet_rq_dma *dma; | |
719 | dma_addr_t addr; | |
720 | u32 offset; | |
721 | void *head; | |
722 | ||
723 | if (!rq->do_dma) { | |
724 | sg_init_one(rq->sg, buf, len); | |
725 | return; | |
726 | } | |
727 | ||
728 | head = page_address(rq->alloc_frag.page); | |
729 | ||
730 | offset = buf - head; | |
731 | ||
732 | dma = head; | |
733 | ||
734 | addr = dma->addr - sizeof(*dma) + offset; | |
735 | ||
736 | sg_init_table(rq->sg, 1); | |
737 | rq->sg[0].dma_address = addr; | |
738 | rq->sg[0].length = len; | |
739 | } | |
740 | ||
741 | static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp) | |
742 | { | |
743 | struct page_frag *alloc_frag = &rq->alloc_frag; | |
744 | struct virtnet_rq_dma *dma; | |
745 | void *buf, *head; | |
746 | dma_addr_t addr; | |
747 | ||
748 | if (unlikely(!skb_page_frag_refill(size, alloc_frag, gfp))) | |
749 | return NULL; | |
750 | ||
751 | head = page_address(alloc_frag->page); | |
752 | ||
753 | if (rq->do_dma) { | |
754 | dma = head; | |
755 | ||
756 | /* new pages */ | |
757 | if (!alloc_frag->offset) { | |
758 | if (rq->last_dma) { | |
759 | /* Now, the new page is allocated, the last dma | |
760 | * will not be used. So the dma can be unmapped | |
761 | * if the ref is 0. | |
762 | */ | |
763 | virtnet_rq_unmap(rq, rq->last_dma, 0); | |
764 | rq->last_dma = NULL; | |
765 | } | |
766 | ||
767 | dma->len = alloc_frag->size - sizeof(*dma); | |
768 | ||
769 | addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1, | |
770 | dma->len, DMA_FROM_DEVICE, 0); | |
771 | if (virtqueue_dma_mapping_error(rq->vq, addr)) | |
772 | return NULL; | |
773 | ||
774 | dma->addr = addr; | |
775 | dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr); | |
776 | ||
777 | /* Add a reference to dma to prevent the entire dma from | |
778 | * being released during error handling. This reference | |
779 | * will be freed after the pages are no longer used. | |
780 | */ | |
781 | get_page(alloc_frag->page); | |
782 | dma->ref = 1; | |
783 | alloc_frag->offset = sizeof(*dma); | |
784 | ||
785 | rq->last_dma = dma; | |
786 | } | |
787 | ||
788 | ++dma->ref; | |
789 | } | |
790 | ||
791 | buf = head + alloc_frag->offset; | |
792 | ||
793 | get_page(alloc_frag->page); | |
794 | alloc_frag->offset += size; | |
795 | ||
796 | return buf; | |
797 | } | |
798 | ||
799 | static void virtnet_rq_set_premapped(struct virtnet_info *vi) | |
800 | { | |
801 | int i; | |
802 | ||
803 | /* disable for big mode */ | |
804 | if (!vi->mergeable_rx_bufs && vi->big_packets) | |
805 | return; | |
806 | ||
807 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
808 | if (virtqueue_set_dma_premapped(vi->rq[i].vq)) | |
809 | continue; | |
810 | ||
811 | vi->rq[i].do_dma = true; | |
812 | } | |
813 | } | |
814 | ||
2311e06b XZ |
815 | static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf) |
816 | { | |
817 | struct virtnet_info *vi = vq->vdev->priv; | |
818 | struct receive_queue *rq; | |
819 | int i = vq2rxq(vq); | |
820 | ||
821 | rq = &vi->rq[i]; | |
822 | ||
823 | if (rq->do_dma) | |
824 | virtnet_rq_unmap(rq, buf, 0); | |
825 | ||
826 | virtnet_rq_free_buf(vi, rq, buf); | |
827 | } | |
828 | ||
5da7137d | 829 | static void free_old_xmit(struct send_queue *sq, bool in_napi) |
25074a44 | 830 | { |
b1dc24ab | 831 | struct virtnet_sq_free_stats stats = {0}; |
25074a44 | 832 | |
b1dc24ab | 833 | __free_old_xmit(sq, in_napi, &stats); |
25074a44 XZ |
834 | |
835 | /* Avoid overhead when no packets have been processed | |
836 | * happens when called speculatively from start_xmit. | |
837 | */ | |
b1dc24ab | 838 | if (!stats.packets) |
25074a44 XZ |
839 | return; |
840 | ||
841 | u64_stats_update_begin(&sq->stats.syncp); | |
b1dc24ab XZ |
842 | u64_stats_add(&sq->stats.bytes, stats.bytes); |
843 | u64_stats_add(&sq->stats.packets, stats.packets); | |
25074a44 XZ |
844 | u64_stats_update_end(&sq->stats.syncp); |
845 | } | |
846 | ||
847 | static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) | |
848 | { | |
849 | if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) | |
850 | return false; | |
851 | else if (q < vi->curr_queue_pairs) | |
852 | return true; | |
853 | else | |
854 | return false; | |
855 | } | |
856 | ||
b8ef4809 XZ |
857 | static void check_sq_full_and_disable(struct virtnet_info *vi, |
858 | struct net_device *dev, | |
859 | struct send_queue *sq) | |
860 | { | |
861 | bool use_napi = sq->napi.weight; | |
862 | int qnum; | |
863 | ||
864 | qnum = sq - vi->sq; | |
865 | ||
866 | /* If running out of space, stop queue to avoid getting packets that we | |
867 | * are then unable to transmit. | |
868 | * An alternative would be to force queuing layer to requeue the skb by | |
869 | * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be | |
870 | * returned in a normal path of operation: it means that driver is not | |
871 | * maintaining the TX queue stop/start state properly, and causes | |
872 | * the stack to do a non-trivial amount of useless work. | |
873 | * Since most packets only take 1 or 2 ring slots, stopping the queue | |
874 | * early means 16 slots are typically wasted. | |
875 | */ | |
876 | if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { | |
877 | netif_stop_subqueue(dev, qnum); | |
878 | if (use_napi) { | |
879 | if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) | |
880 | virtqueue_napi_schedule(&sq->napi, sq->vq); | |
881 | } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { | |
882 | /* More just got used, free them then recheck. */ | |
5da7137d | 883 | free_old_xmit(sq, false); |
b8ef4809 XZ |
884 | if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { |
885 | netif_start_subqueue(dev, qnum); | |
886 | virtqueue_disable_cb(sq->vq); | |
887 | } | |
888 | } | |
889 | } | |
890 | } | |
891 | ||
735fc405 JDB |
892 | static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, |
893 | struct send_queue *sq, | |
894 | struct xdp_frame *xdpf) | |
56434a01 | 895 | { |
56434a01 | 896 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
97717e8d HQ |
897 | struct skb_shared_info *shinfo; |
898 | u8 nr_frags = 0; | |
899 | int err, i; | |
56434a01 | 900 | |
cac320c8 JDB |
901 | if (unlikely(xdpf->headroom < vi->hdr_len)) |
902 | return -EOVERFLOW; | |
903 | ||
97717e8d HQ |
904 | if (unlikely(xdp_frame_has_frags(xdpf))) { |
905 | shinfo = xdp_get_shared_info_from_frame(xdpf); | |
906 | nr_frags = shinfo->nr_frags; | |
907 | } | |
908 | ||
909 | /* In wrapping function virtnet_xdp_xmit(), we need to free | |
910 | * up the pending old buffers, where we need to calculate the | |
911 | * position of skb_shared_info in xdp_get_frame_len() and | |
912 | * xdp_return_frame(), which will involve to xdpf->data and | |
913 | * xdpf->headroom. Therefore, we need to update the value of | |
914 | * headroom synchronously here. | |
915 | */ | |
916 | xdpf->headroom -= vi->hdr_len; | |
cac320c8 | 917 | xdpf->data -= vi->hdr_len; |
f6b10209 | 918 | /* Zero header and leave csum up to XDP layers */ |
cac320c8 | 919 | hdr = xdpf->data; |
f6b10209 | 920 | memset(hdr, 0, vi->hdr_len); |
cac320c8 | 921 | xdpf->len += vi->hdr_len; |
bb91accf | 922 | |
97717e8d HQ |
923 | sg_init_table(sq->sg, nr_frags + 1); |
924 | sg_set_buf(sq->sg, xdpf->data, xdpf->len); | |
925 | for (i = 0; i < nr_frags; i++) { | |
926 | skb_frag_t *frag = &shinfo->frags[i]; | |
927 | ||
928 | sg_set_page(&sq->sg[i + 1], skb_frag_page(frag), | |
929 | skb_frag_size(frag), skb_frag_off(frag)); | |
930 | } | |
bb91accf | 931 | |
97717e8d HQ |
932 | err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1, |
933 | xdp_to_ptr(xdpf), GFP_ATOMIC); | |
11b7d897 | 934 | if (unlikely(err)) |
cac320c8 | 935 | return -ENOSPC; /* Caller handle free/refcnt */ |
56434a01 | 936 | |
cac320c8 | 937 | return 0; |
56434a01 JF |
938 | } |
939 | ||
97c2c69e XZ |
940 | /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on |
941 | * the current cpu, so it does not need to be locked. | |
942 | * | |
943 | * Here we use marco instead of inline functions because we have to deal with | |
944 | * three issues at the same time: 1. the choice of sq. 2. judge and execute the | |
945 | * lock/unlock of txq 3. make sparse happy. It is difficult for two inline | |
946 | * functions to perfectly solve these three problems at the same time. | |
947 | */ | |
948 | #define virtnet_xdp_get_sq(vi) ({ \ | |
3dcc1edc | 949 | int cpu = smp_processor_id(); \ |
97c2c69e XZ |
950 | struct netdev_queue *txq; \ |
951 | typeof(vi) v = (vi); \ | |
952 | unsigned int qp; \ | |
953 | \ | |
954 | if (v->curr_queue_pairs > nr_cpu_ids) { \ | |
955 | qp = v->curr_queue_pairs - v->xdp_queue_pairs; \ | |
3dcc1edc | 956 | qp += cpu; \ |
97c2c69e XZ |
957 | txq = netdev_get_tx_queue(v->dev, qp); \ |
958 | __netif_tx_acquire(txq); \ | |
959 | } else { \ | |
3dcc1edc | 960 | qp = cpu % v->curr_queue_pairs; \ |
97c2c69e | 961 | txq = netdev_get_tx_queue(v->dev, qp); \ |
3dcc1edc | 962 | __netif_tx_lock(txq, cpu); \ |
97c2c69e XZ |
963 | } \ |
964 | v->sq + qp; \ | |
965 | }) | |
966 | ||
967 | #define virtnet_xdp_put_sq(vi, q) { \ | |
968 | struct netdev_queue *txq; \ | |
969 | typeof(vi) v = (vi); \ | |
970 | \ | |
971 | txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \ | |
972 | if (v->curr_queue_pairs > nr_cpu_ids) \ | |
973 | __netif_tx_release(txq); \ | |
974 | else \ | |
975 | __netif_tx_unlock(txq); \ | |
2a43565c TM |
976 | } |
977 | ||
735fc405 | 978 | static int virtnet_xdp_xmit(struct net_device *dev, |
42b33468 | 979 | int n, struct xdp_frame **frames, u32 flags) |
186b3c99 JW |
980 | { |
981 | struct virtnet_info *vi = netdev_priv(dev); | |
b1dc24ab | 982 | struct virtnet_sq_free_stats stats = {0}; |
8dcc5b0a JDB |
983 | struct receive_queue *rq = vi->rq; |
984 | struct bpf_prog *xdp_prog; | |
735fc405 | 985 | struct send_queue *sq; |
fdc13979 | 986 | int nxmit = 0; |
461f03dc | 987 | int kicks = 0; |
fdc13979 | 988 | int ret; |
735fc405 JDB |
989 | int i; |
990 | ||
8dcc5b0a JDB |
991 | /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this |
992 | * indicate XDP resources have been successfully allocated. | |
993 | */ | |
9719c6b9 | 994 | xdp_prog = rcu_access_pointer(rq->xdp_prog); |
1667c08a TM |
995 | if (!xdp_prog) |
996 | return -ENXIO; | |
997 | ||
97c2c69e | 998 | sq = virtnet_xdp_get_sq(vi); |
1667c08a TM |
999 | |
1000 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { | |
1001 | ret = -EINVAL; | |
5b8f3c8d TM |
1002 | goto out; |
1003 | } | |
8dcc5b0a | 1004 | |
735fc405 | 1005 | /* Free up any pending old buffers before queueing new ones. */ |
b1dc24ab | 1006 | __free_old_xmit(sq, false, &stats); |
735fc405 JDB |
1007 | |
1008 | for (i = 0; i < n; i++) { | |
1009 | struct xdp_frame *xdpf = frames[i]; | |
1010 | ||
fdc13979 LB |
1011 | if (__virtnet_xdp_xmit_one(vi, sq, xdpf)) |
1012 | break; | |
1013 | nxmit++; | |
735fc405 | 1014 | } |
fdc13979 | 1015 | ret = nxmit; |
5d274cb4 | 1016 | |
cd1c604a XZ |
1017 | if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) |
1018 | check_sq_full_and_disable(vi, dev, sq); | |
1019 | ||
461f03dc TM |
1020 | if (flags & XDP_XMIT_FLUSH) { |
1021 | if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) | |
1022 | kicks = 1; | |
1023 | } | |
5b8f3c8d TM |
1024 | out: |
1025 | u64_stats_update_begin(&sq->stats.syncp); | |
b1dc24ab XZ |
1026 | u64_stats_add(&sq->stats.bytes, stats.bytes); |
1027 | u64_stats_add(&sq->stats.packets, stats.packets); | |
61217d8f ED |
1028 | u64_stats_add(&sq->stats.xdp_tx, n); |
1029 | u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit); | |
1030 | u64_stats_add(&sq->stats.kicks, kicks); | |
5b8f3c8d | 1031 | u64_stats_update_end(&sq->stats.syncp); |
5d274cb4 | 1032 | |
97c2c69e | 1033 | virtnet_xdp_put_sq(vi, sq); |
5b8f3c8d | 1034 | return ret; |
186b3c99 JW |
1035 | } |
1036 | ||
bb2c1e9e XZ |
1037 | static void put_xdp_frags(struct xdp_buff *xdp) |
1038 | { | |
1039 | struct skb_shared_info *shinfo; | |
1040 | struct page *xdp_page; | |
1041 | int i; | |
1042 | ||
1043 | if (xdp_buff_has_frags(xdp)) { | |
1044 | shinfo = xdp_get_shared_info_from_buff(xdp); | |
1045 | for (i = 0; i < shinfo->nr_frags; i++) { | |
1046 | xdp_page = skb_frag_page(&shinfo->frags[i]); | |
1047 | put_page(xdp_page); | |
1048 | } | |
1049 | } | |
1050 | } | |
1051 | ||
00765f8e XZ |
1052 | static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, |
1053 | struct net_device *dev, | |
1054 | unsigned int *xdp_xmit, | |
1055 | struct virtnet_rq_stats *stats) | |
1056 | { | |
1057 | struct xdp_frame *xdpf; | |
1058 | int err; | |
1059 | u32 act; | |
1060 | ||
1061 | act = bpf_prog_run_xdp(xdp_prog, xdp); | |
61217d8f | 1062 | u64_stats_inc(&stats->xdp_packets); |
00765f8e XZ |
1063 | |
1064 | switch (act) { | |
1065 | case XDP_PASS: | |
1066 | return act; | |
1067 | ||
1068 | case XDP_TX: | |
61217d8f | 1069 | u64_stats_inc(&stats->xdp_tx); |
00765f8e XZ |
1070 | xdpf = xdp_convert_buff_to_frame(xdp); |
1071 | if (unlikely(!xdpf)) { | |
1072 | netdev_dbg(dev, "convert buff to frame failed for xdp\n"); | |
1073 | return XDP_DROP; | |
1074 | } | |
1075 | ||
1076 | err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); | |
1077 | if (unlikely(!err)) { | |
1078 | xdp_return_frame_rx_napi(xdpf); | |
1079 | } else if (unlikely(err < 0)) { | |
1080 | trace_xdp_exception(dev, xdp_prog, act); | |
1081 | return XDP_DROP; | |
1082 | } | |
1083 | *xdp_xmit |= VIRTIO_XDP_TX; | |
1084 | return act; | |
1085 | ||
1086 | case XDP_REDIRECT: | |
61217d8f | 1087 | u64_stats_inc(&stats->xdp_redirects); |
00765f8e XZ |
1088 | err = xdp_do_redirect(dev, xdp, xdp_prog); |
1089 | if (err) | |
1090 | return XDP_DROP; | |
1091 | ||
1092 | *xdp_xmit |= VIRTIO_XDP_REDIR; | |
1093 | return act; | |
1094 | ||
1095 | default: | |
1096 | bpf_warn_invalid_xdp_action(dev, xdp_prog, act); | |
1097 | fallthrough; | |
1098 | case XDP_ABORTED: | |
1099 | trace_xdp_exception(dev, xdp_prog, act); | |
1100 | fallthrough; | |
1101 | case XDP_DROP: | |
1102 | return XDP_DROP; | |
1103 | } | |
1104 | } | |
1105 | ||
f6b10209 JW |
1106 | static unsigned int virtnet_get_headroom(struct virtnet_info *vi) |
1107 | { | |
97c2c69e | 1108 | return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0; |
f6b10209 JW |
1109 | } |
1110 | ||
4941d472 JW |
1111 | /* We copy the packet for XDP in the following cases: |
1112 | * | |
1113 | * 1) Packet is scattered across multiple rx buffers. | |
1114 | * 2) Headroom space is insufficient. | |
1115 | * | |
1116 | * This is inefficient but it's a temporary condition that | |
1117 | * we hit right after XDP is enabled and until queue is refilled | |
1118 | * with large buffers with sufficient headroom - so it should affect | |
1119 | * at most queue size packets. | |
1120 | * Afterwards, the conditions to enable | |
1121 | * XDP should preclude the underlying device from sending packets | |
1122 | * across multiple buffers (num_buf > 1), and we make sure buffers | |
1123 | * have enough headroom. | |
1124 | */ | |
1125 | static struct page *xdp_linearize_page(struct receive_queue *rq, | |
981f14d4 | 1126 | int *num_buf, |
4941d472 JW |
1127 | struct page *p, |
1128 | int offset, | |
1129 | int page_off, | |
1130 | unsigned int *len) | |
1131 | { | |
853618d5 XZ |
1132 | int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
1133 | struct page *page; | |
4941d472 | 1134 | |
853618d5 XZ |
1135 | if (page_off + *len + tailroom > PAGE_SIZE) |
1136 | return NULL; | |
1137 | ||
1138 | page = alloc_page(GFP_ATOMIC); | |
4941d472 JW |
1139 | if (!page) |
1140 | return NULL; | |
1141 | ||
1142 | memcpy(page_address(page) + page_off, page_address(p) + offset, *len); | |
1143 | page_off += *len; | |
1144 | ||
1145 | while (--*num_buf) { | |
1146 | unsigned int buflen; | |
1147 | void *buf; | |
1148 | int off; | |
1149 | ||
295525e2 | 1150 | buf = virtnet_rq_get_buf(rq, &buflen, NULL); |
4941d472 JW |
1151 | if (unlikely(!buf)) |
1152 | goto err_buf; | |
1153 | ||
1154 | p = virt_to_head_page(buf); | |
1155 | off = buf - page_address(p); | |
1156 | ||
1157 | /* guard against a misconfigured or uncooperative backend that | |
1158 | * is sending packet larger than the MTU. | |
1159 | */ | |
3cc81a9a | 1160 | if ((page_off + buflen + tailroom) > PAGE_SIZE) { |
4941d472 JW |
1161 | put_page(p); |
1162 | goto err_buf; | |
1163 | } | |
1164 | ||
1165 | memcpy(page_address(page) + page_off, | |
1166 | page_address(p) + off, buflen); | |
1167 | page_off += buflen; | |
1168 | put_page(p); | |
1169 | } | |
1170 | ||
1171 | /* Headroom does not contribute to packet length */ | |
1172 | *len = page_off - VIRTIO_XDP_HEADROOM; | |
1173 | return page; | |
1174 | err_buf: | |
1175 | __free_pages(page, 0); | |
1176 | return NULL; | |
1177 | } | |
1178 | ||
19e8c85e XZ |
1179 | static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi, |
1180 | unsigned int xdp_headroom, | |
1181 | void *buf, | |
1182 | unsigned int len) | |
1183 | { | |
1184 | unsigned int header_offset; | |
1185 | unsigned int headroom; | |
1186 | unsigned int buflen; | |
1187 | struct sk_buff *skb; | |
1188 | ||
1189 | header_offset = VIRTNET_RX_PAD + xdp_headroom; | |
1190 | headroom = vi->hdr_len + header_offset; | |
1191 | buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + | |
1192 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
1193 | ||
21e26a71 XZ |
1194 | skb = virtnet_build_skb(buf, buflen, headroom, len); |
1195 | if (unlikely(!skb)) | |
19e8c85e XZ |
1196 | return NULL; |
1197 | ||
19e8c85e | 1198 | buf += header_offset; |
dae64749 | 1199 | memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len); |
19e8c85e XZ |
1200 | |
1201 | return skb; | |
1202 | } | |
1203 | ||
c5f3e72f XZ |
1204 | static struct sk_buff *receive_small_xdp(struct net_device *dev, |
1205 | struct virtnet_info *vi, | |
1206 | struct receive_queue *rq, | |
1207 | struct bpf_prog *xdp_prog, | |
1208 | void *buf, | |
1209 | unsigned int xdp_headroom, | |
1210 | unsigned int len, | |
1211 | unsigned int *xdp_xmit, | |
1212 | struct virtnet_rq_stats *stats) | |
1213 | { | |
1214 | unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom; | |
1215 | unsigned int headroom = vi->hdr_len + header_offset; | |
1216 | struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; | |
1217 | struct page *page = virt_to_head_page(buf); | |
1218 | struct page *xdp_page; | |
1219 | unsigned int buflen; | |
1220 | struct xdp_buff xdp; | |
1221 | struct sk_buff *skb; | |
c5f3e72f | 1222 | unsigned int metasize = 0; |
c5f3e72f XZ |
1223 | u32 act; |
1224 | ||
1225 | if (unlikely(hdr->hdr.gso_type)) | |
1226 | goto err_xdp; | |
1227 | ||
1228 | buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + | |
1229 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
1230 | ||
1231 | if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { | |
1232 | int offset = buf - page_address(page) + header_offset; | |
1233 | unsigned int tlen = len + vi->hdr_len; | |
1234 | int num_buf = 1; | |
1235 | ||
1236 | xdp_headroom = virtnet_get_headroom(vi); | |
1237 | header_offset = VIRTNET_RX_PAD + xdp_headroom; | |
1238 | headroom = vi->hdr_len + header_offset; | |
1239 | buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + | |
1240 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
1241 | xdp_page = xdp_linearize_page(rq, &num_buf, page, | |
1242 | offset, header_offset, | |
1243 | &tlen); | |
1244 | if (!xdp_page) | |
1245 | goto err_xdp; | |
1246 | ||
1247 | buf = page_address(xdp_page); | |
1248 | put_page(page); | |
1249 | page = xdp_page; | |
1250 | } | |
1251 | ||
1252 | xdp_init_buff(&xdp, buflen, &rq->xdp_rxq); | |
1253 | xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, | |
1254 | xdp_headroom, len, true); | |
c5f3e72f XZ |
1255 | |
1256 | act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); | |
1257 | ||
1258 | switch (act) { | |
1259 | case XDP_PASS: | |
1260 | /* Recalculate length in case bpf program changed it */ | |
c5f3e72f XZ |
1261 | len = xdp.data_end - xdp.data; |
1262 | metasize = xdp.data - xdp.data_meta; | |
1263 | break; | |
1264 | ||
1265 | case XDP_TX: | |
1266 | case XDP_REDIRECT: | |
1267 | goto xdp_xmit; | |
1268 | ||
1269 | default: | |
1270 | goto err_xdp; | |
1271 | } | |
1272 | ||
21e26a71 XZ |
1273 | skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len); |
1274 | if (unlikely(!skb)) | |
c5f3e72f XZ |
1275 | goto err; |
1276 | ||
c5f3e72f XZ |
1277 | if (metasize) |
1278 | skb_metadata_set(skb, metasize); | |
1279 | ||
1280 | return skb; | |
1281 | ||
1282 | err_xdp: | |
61217d8f | 1283 | u64_stats_inc(&stats->xdp_drops); |
c5f3e72f | 1284 | err: |
61217d8f | 1285 | u64_stats_inc(&stats->drops); |
c5f3e72f XZ |
1286 | put_page(page); |
1287 | xdp_xmit: | |
1288 | return NULL; | |
1289 | } | |
1290 | ||
bb91accf JW |
1291 | static struct sk_buff *receive_small(struct net_device *dev, |
1292 | struct virtnet_info *vi, | |
1293 | struct receive_queue *rq, | |
192f68cf | 1294 | void *buf, void *ctx, |
186b3c99 | 1295 | unsigned int len, |
7d9d60fd | 1296 | unsigned int *xdp_xmit, |
d46eeeaf | 1297 | struct virtnet_rq_stats *stats) |
f121159d | 1298 | { |
4941d472 | 1299 | unsigned int xdp_headroom = (unsigned long)ctx; |
4941d472 | 1300 | struct page *page = virt_to_head_page(buf); |
aef76506 | 1301 | struct sk_buff *skb; |
11b7d897 | 1302 | |
012873d0 | 1303 | len -= vi->hdr_len; |
61217d8f | 1304 | u64_stats_add(&stats->bytes, len); |
f121159d | 1305 | |
ad993a95 XY |
1306 | if (unlikely(len > GOOD_PACKET_LEN)) { |
1307 | pr_debug("%s: rx error: len %u exceeds max size %d\n", | |
1308 | dev->name, len, GOOD_PACKET_LEN); | |
d12a26b7 | 1309 | DEV_STATS_INC(dev, rx_length_errors); |
053c9e18 | 1310 | goto err; |
ad993a95 | 1311 | } |
6213f07c | 1312 | |
aef76506 XZ |
1313 | if (unlikely(vi->xdp_enabled)) { |
1314 | struct bpf_prog *xdp_prog; | |
1315 | ||
1316 | rcu_read_lock(); | |
1317 | xdp_prog = rcu_dereference(rq->xdp_prog); | |
1318 | if (xdp_prog) { | |
1319 | skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf, | |
1320 | xdp_headroom, len, xdp_xmit, | |
1321 | stats); | |
1322 | rcu_read_unlock(); | |
1323 | return skb; | |
1324 | } | |
c5f3e72f | 1325 | rcu_read_unlock(); |
bb91accf | 1326 | } |
bb91accf | 1327 | |
19e8c85e XZ |
1328 | skb = receive_small_build_skb(vi, xdp_headroom, buf, len); |
1329 | if (likely(skb)) | |
1330 | return skb; | |
bb91accf | 1331 | |
053c9e18 | 1332 | err: |
61217d8f | 1333 | u64_stats_inc(&stats->drops); |
4941d472 | 1334 | put_page(page); |
bb91accf | 1335 | return NULL; |
f121159d MT |
1336 | } |
1337 | ||
1338 | static struct sk_buff *receive_big(struct net_device *dev, | |
946fa564 | 1339 | struct virtnet_info *vi, |
f121159d MT |
1340 | struct receive_queue *rq, |
1341 | void *buf, | |
7d9d60fd | 1342 | unsigned int len, |
d46eeeaf | 1343 | struct virtnet_rq_stats *stats) |
f121159d MT |
1344 | { |
1345 | struct page *page = buf; | |
503d539a | 1346 | struct sk_buff *skb = |
fa0f1ba7 | 1347 | page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0); |
f600b690 | 1348 | |
61217d8f | 1349 | u64_stats_add(&stats->bytes, len - vi->hdr_len); |
f121159d MT |
1350 | if (unlikely(!skb)) |
1351 | goto err; | |
1352 | ||
1353 | return skb; | |
1354 | ||
1355 | err: | |
61217d8f | 1356 | u64_stats_inc(&stats->drops); |
f121159d MT |
1357 | give_pages(rq, page); |
1358 | return NULL; | |
1359 | } | |
1360 | ||
80f50f91 XZ |
1361 | static void mergeable_buf_free(struct receive_queue *rq, int num_buf, |
1362 | struct net_device *dev, | |
1363 | struct virtnet_rq_stats *stats) | |
1364 | { | |
1365 | struct page *page; | |
1366 | void *buf; | |
1367 | int len; | |
1368 | ||
1369 | while (num_buf-- > 1) { | |
295525e2 | 1370 | buf = virtnet_rq_get_buf(rq, &len, NULL); |
80f50f91 XZ |
1371 | if (unlikely(!buf)) { |
1372 | pr_debug("%s: rx error: %d buffers missing\n", | |
1373 | dev->name, num_buf); | |
d12a26b7 | 1374 | DEV_STATS_INC(dev, rx_length_errors); |
80f50f91 XZ |
1375 | break; |
1376 | } | |
61217d8f | 1377 | u64_stats_add(&stats->bytes, len); |
80f50f91 XZ |
1378 | page = virt_to_head_page(buf); |
1379 | put_page(page); | |
1380 | } | |
1381 | } | |
1382 | ||
b26aa481 HQ |
1383 | /* Why not use xdp_build_skb_from_frame() ? |
1384 | * XDP core assumes that xdp frags are PAGE_SIZE in length, while in | |
1385 | * virtio-net there are 2 points that do not match its requirements: | |
1386 | * 1. The size of the prefilled buffer is not fixed before xdp is set. | |
1387 | * 2. xdp_build_skb_from_frame() does more checks that we don't need, | |
1388 | * like eth_type_trans() (which virtio-net does in receive_buf()). | |
1389 | */ | |
1390 | static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev, | |
1391 | struct virtnet_info *vi, | |
1392 | struct xdp_buff *xdp, | |
1393 | unsigned int xdp_frags_truesz) | |
1394 | { | |
1395 | struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); | |
1396 | unsigned int headroom, data_len; | |
1397 | struct sk_buff *skb; | |
1398 | int metasize; | |
1399 | u8 nr_frags; | |
1400 | ||
1401 | if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) { | |
1402 | pr_debug("Error building skb as missing reserved tailroom for xdp"); | |
1403 | return NULL; | |
1404 | } | |
1405 | ||
1406 | if (unlikely(xdp_buff_has_frags(xdp))) | |
1407 | nr_frags = sinfo->nr_frags; | |
1408 | ||
1409 | skb = build_skb(xdp->data_hard_start, xdp->frame_sz); | |
1410 | if (unlikely(!skb)) | |
1411 | return NULL; | |
1412 | ||
1413 | headroom = xdp->data - xdp->data_hard_start; | |
1414 | data_len = xdp->data_end - xdp->data; | |
1415 | skb_reserve(skb, headroom); | |
1416 | __skb_put(skb, data_len); | |
1417 | ||
1418 | metasize = xdp->data - xdp->data_meta; | |
1419 | metasize = metasize > 0 ? metasize : 0; | |
1420 | if (metasize) | |
1421 | skb_metadata_set(skb, metasize); | |
1422 | ||
1423 | if (unlikely(xdp_buff_has_frags(xdp))) | |
1424 | xdp_update_skb_shared_info(skb, nr_frags, | |
1425 | sinfo->xdp_frags_size, | |
1426 | xdp_frags_truesz, | |
1427 | xdp_buff_is_frag_pfmemalloc(xdp)); | |
1428 | ||
1429 | return skb; | |
1430 | } | |
1431 | ||
ef75cb51 HQ |
1432 | /* TODO: build xdp in big mode */ |
1433 | static int virtnet_build_xdp_buff_mrg(struct net_device *dev, | |
1434 | struct virtnet_info *vi, | |
1435 | struct receive_queue *rq, | |
1436 | struct xdp_buff *xdp, | |
1437 | void *buf, | |
1438 | unsigned int len, | |
1439 | unsigned int frame_sz, | |
981f14d4 | 1440 | int *num_buf, |
ef75cb51 HQ |
1441 | unsigned int *xdp_frags_truesize, |
1442 | struct virtnet_rq_stats *stats) | |
1443 | { | |
1444 | struct virtio_net_hdr_mrg_rxbuf *hdr = buf; | |
1445 | unsigned int headroom, tailroom, room; | |
1446 | unsigned int truesize, cur_frag_size; | |
1447 | struct skb_shared_info *shinfo; | |
1448 | unsigned int xdp_frags_truesz = 0; | |
1449 | struct page *page; | |
1450 | skb_frag_t *frag; | |
1451 | int offset; | |
1452 | void *ctx; | |
1453 | ||
1454 | xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq); | |
1455 | xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM, | |
1456 | VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true); | |
1457 | ||
981f14d4 HQ |
1458 | if (!*num_buf) |
1459 | return 0; | |
1460 | ||
ef75cb51 HQ |
1461 | if (*num_buf > 1) { |
1462 | /* If we want to build multi-buffer xdp, we need | |
1463 | * to specify that the flags of xdp_buff have the | |
1464 | * XDP_FLAGS_HAS_FRAG bit. | |
1465 | */ | |
1466 | if (!xdp_buff_has_frags(xdp)) | |
1467 | xdp_buff_set_frags_flag(xdp); | |
1468 | ||
1469 | shinfo = xdp_get_shared_info_from_buff(xdp); | |
1470 | shinfo->nr_frags = 0; | |
1471 | shinfo->xdp_frags_size = 0; | |
1472 | } | |
1473 | ||
981f14d4 | 1474 | if (*num_buf > MAX_SKB_FRAGS + 1) |
ef75cb51 HQ |
1475 | return -EINVAL; |
1476 | ||
981f14d4 | 1477 | while (--*num_buf > 0) { |
295525e2 | 1478 | buf = virtnet_rq_get_buf(rq, &len, &ctx); |
ef75cb51 HQ |
1479 | if (unlikely(!buf)) { |
1480 | pr_debug("%s: rx error: %d buffers out of %d missing\n", | |
1481 | dev->name, *num_buf, | |
1482 | virtio16_to_cpu(vi->vdev, hdr->num_buffers)); | |
d12a26b7 | 1483 | DEV_STATS_INC(dev, rx_length_errors); |
4cb00b13 | 1484 | goto err; |
ef75cb51 HQ |
1485 | } |
1486 | ||
61217d8f | 1487 | u64_stats_add(&stats->bytes, len); |
ef75cb51 HQ |
1488 | page = virt_to_head_page(buf); |
1489 | offset = buf - page_address(page); | |
1490 | ||
1491 | truesize = mergeable_ctx_to_truesize(ctx); | |
1492 | headroom = mergeable_ctx_to_headroom(ctx); | |
1493 | tailroom = headroom ? sizeof(struct skb_shared_info) : 0; | |
1494 | room = SKB_DATA_ALIGN(headroom + tailroom); | |
1495 | ||
1496 | cur_frag_size = truesize; | |
1497 | xdp_frags_truesz += cur_frag_size; | |
1498 | if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) { | |
1499 | put_page(page); | |
1500 | pr_debug("%s: rx error: len %u exceeds truesize %lu\n", | |
1501 | dev->name, len, (unsigned long)(truesize - room)); | |
d12a26b7 | 1502 | DEV_STATS_INC(dev, rx_length_errors); |
4cb00b13 | 1503 | goto err; |
ef75cb51 HQ |
1504 | } |
1505 | ||
1506 | frag = &shinfo->frags[shinfo->nr_frags++]; | |
b51f4113 | 1507 | skb_frag_fill_page_desc(frag, page, offset, len); |
ef75cb51 HQ |
1508 | if (page_is_pfmemalloc(page)) |
1509 | xdp_buff_set_frag_pfmemalloc(xdp); | |
1510 | ||
1511 | shinfo->xdp_frags_size += len; | |
1512 | } | |
1513 | ||
1514 | *xdp_frags_truesize = xdp_frags_truesz; | |
1515 | return 0; | |
4cb00b13 XZ |
1516 | |
1517 | err: | |
1518 | put_xdp_frags(xdp); | |
1519 | return -EINVAL; | |
ef75cb51 HQ |
1520 | } |
1521 | ||
ad4858be XZ |
1522 | static void *mergeable_xdp_get_buf(struct virtnet_info *vi, |
1523 | struct receive_queue *rq, | |
1524 | struct bpf_prog *xdp_prog, | |
1525 | void *ctx, | |
1526 | unsigned int *frame_sz, | |
1527 | int *num_buf, | |
1528 | struct page **page, | |
1529 | int offset, | |
1530 | unsigned int *len, | |
1531 | struct virtio_net_hdr_mrg_rxbuf *hdr) | |
1532 | { | |
1533 | unsigned int truesize = mergeable_ctx_to_truesize(ctx); | |
1534 | unsigned int headroom = mergeable_ctx_to_headroom(ctx); | |
1535 | struct page *xdp_page; | |
1536 | unsigned int xdp_room; | |
1537 | ||
1538 | /* Transient failure which in theory could occur if | |
1539 | * in-flight packets from before XDP was enabled reach | |
1540 | * the receive path after XDP is loaded. | |
1541 | */ | |
1542 | if (unlikely(hdr->hdr.gso_type)) | |
1543 | return NULL; | |
1544 | ||
1545 | /* Now XDP core assumes frag size is PAGE_SIZE, but buffers | |
1546 | * with headroom may add hole in truesize, which | |
1547 | * make their length exceed PAGE_SIZE. So we disabled the | |
1548 | * hole mechanism for xdp. See add_recvbuf_mergeable(). | |
1549 | */ | |
1550 | *frame_sz = truesize; | |
1551 | ||
dbe4fec2 XZ |
1552 | if (likely(headroom >= virtnet_get_headroom(vi) && |
1553 | (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) { | |
1554 | return page_address(*page) + offset; | |
1555 | } | |
1556 | ||
ad4858be XZ |
1557 | /* This happens when headroom is not enough because |
1558 | * of the buffer was prefilled before XDP is set. | |
1559 | * This should only happen for the first several packets. | |
1560 | * In fact, vq reset can be used here to help us clean up | |
1561 | * the prefilled buffers, but many existing devices do not | |
1562 | * support it, and we don't want to bother users who are | |
1563 | * using xdp normally. | |
1564 | */ | |
dbe4fec2 | 1565 | if (!xdp_prog->aux->xdp_has_frags) { |
ad4858be XZ |
1566 | /* linearize data for XDP */ |
1567 | xdp_page = xdp_linearize_page(rq, num_buf, | |
1568 | *page, offset, | |
1569 | VIRTIO_XDP_HEADROOM, | |
1570 | len); | |
ad4858be XZ |
1571 | if (!xdp_page) |
1572 | return NULL; | |
dbe4fec2 | 1573 | } else { |
ad4858be XZ |
1574 | xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM + |
1575 | sizeof(struct skb_shared_info)); | |
1576 | if (*len + xdp_room > PAGE_SIZE) | |
1577 | return NULL; | |
1578 | ||
1579 | xdp_page = alloc_page(GFP_ATOMIC); | |
1580 | if (!xdp_page) | |
1581 | return NULL; | |
1582 | ||
1583 | memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM, | |
1584 | page_address(*page) + offset, *len); | |
ad4858be XZ |
1585 | } |
1586 | ||
dbe4fec2 XZ |
1587 | *frame_sz = PAGE_SIZE; |
1588 | ||
1589 | put_page(*page); | |
1590 | ||
1591 | *page = xdp_page; | |
1592 | ||
1593 | return page_address(*page) + VIRTIO_XDP_HEADROOM; | |
ad4858be XZ |
1594 | } |
1595 | ||
d8f2835a XZ |
1596 | static struct sk_buff *receive_mergeable_xdp(struct net_device *dev, |
1597 | struct virtnet_info *vi, | |
1598 | struct receive_queue *rq, | |
1599 | struct bpf_prog *xdp_prog, | |
1600 | void *buf, | |
1601 | void *ctx, | |
1602 | unsigned int len, | |
1603 | unsigned int *xdp_xmit, | |
1604 | struct virtnet_rq_stats *stats) | |
1605 | { | |
1606 | struct virtio_net_hdr_mrg_rxbuf *hdr = buf; | |
1607 | int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); | |
1608 | struct page *page = virt_to_head_page(buf); | |
1609 | int offset = buf - page_address(page); | |
1610 | unsigned int xdp_frags_truesz = 0; | |
1611 | struct sk_buff *head_skb; | |
1612 | unsigned int frame_sz; | |
1613 | struct xdp_buff xdp; | |
1614 | void *data; | |
1615 | u32 act; | |
1616 | int err; | |
1617 | ||
1618 | data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page, | |
1619 | offset, &len, hdr); | |
1620 | if (unlikely(!data)) | |
1621 | goto err_xdp; | |
1622 | ||
1623 | err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz, | |
1624 | &num_buf, &xdp_frags_truesz, stats); | |
1625 | if (unlikely(err)) | |
1626 | goto err_xdp; | |
1627 | ||
1628 | act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); | |
1629 | ||
1630 | switch (act) { | |
1631 | case XDP_PASS: | |
1632 | head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz); | |
1633 | if (unlikely(!head_skb)) | |
1634 | break; | |
1635 | return head_skb; | |
1636 | ||
1637 | case XDP_TX: | |
1638 | case XDP_REDIRECT: | |
1639 | return NULL; | |
1640 | ||
1641 | default: | |
1642 | break; | |
1643 | } | |
1644 | ||
1645 | put_xdp_frags(&xdp); | |
1646 | ||
1647 | err_xdp: | |
1648 | put_page(page); | |
1649 | mergeable_buf_free(rq, num_buf, dev, stats); | |
1650 | ||
61217d8f ED |
1651 | u64_stats_inc(&stats->xdp_drops); |
1652 | u64_stats_inc(&stats->drops); | |
d8f2835a XZ |
1653 | return NULL; |
1654 | } | |
1655 | ||
8fc3b9e9 | 1656 | static struct sk_buff *receive_mergeable(struct net_device *dev, |
fdd819b2 | 1657 | struct virtnet_info *vi, |
8fc3b9e9 | 1658 | struct receive_queue *rq, |
680557cf MT |
1659 | void *buf, |
1660 | void *ctx, | |
186b3c99 | 1661 | unsigned int len, |
7d9d60fd | 1662 | unsigned int *xdp_xmit, |
d46eeeaf | 1663 | struct virtnet_rq_stats *stats) |
9ab86bbc | 1664 | { |
012873d0 | 1665 | struct virtio_net_hdr_mrg_rxbuf *hdr = buf; |
981f14d4 | 1666 | int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); |
8fc3b9e9 MT |
1667 | struct page *page = virt_to_head_page(buf); |
1668 | int offset = buf - page_address(page); | |
f600b690 | 1669 | struct sk_buff *head_skb, *curr_skb; |
9ce6146e | 1670 | unsigned int truesize = mergeable_ctx_to_truesize(ctx); |
4941d472 | 1671 | unsigned int headroom = mergeable_ctx_to_headroom(ctx); |
ef75cb51 HQ |
1672 | unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; |
1673 | unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); | |
f600b690 | 1674 | |
56434a01 | 1675 | head_skb = NULL; |
61217d8f | 1676 | u64_stats_add(&stats->bytes, len - vi->hdr_len); |
56434a01 | 1677 | |
ef75cb51 | 1678 | if (unlikely(len > truesize - room)) { |
ad993a95 | 1679 | pr_debug("%s: rx error: len %u exceeds truesize %lu\n", |
ef75cb51 | 1680 | dev->name, len, (unsigned long)(truesize - room)); |
d12a26b7 | 1681 | DEV_STATS_INC(dev, rx_length_errors); |
ad993a95 XY |
1682 | goto err_skb; |
1683 | } | |
6213f07c | 1684 | |
59ba3b1a XZ |
1685 | if (unlikely(vi->xdp_enabled)) { |
1686 | struct bpf_prog *xdp_prog; | |
6213f07c | 1687 | |
59ba3b1a XZ |
1688 | rcu_read_lock(); |
1689 | xdp_prog = rcu_dereference(rq->xdp_prog); | |
1690 | if (xdp_prog) { | |
1691 | head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx, | |
1692 | len, xdp_xmit, stats); | |
1693 | rcu_read_unlock(); | |
1694 | return head_skb; | |
1695 | } | |
d8f2835a | 1696 | rcu_read_unlock(); |
f600b690 | 1697 | } |
ab7db917 | 1698 | |
fa0f1ba7 | 1699 | head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom); |
f600b690 | 1700 | curr_skb = head_skb; |
9ab86bbc | 1701 | |
8fc3b9e9 MT |
1702 | if (unlikely(!curr_skb)) |
1703 | goto err_skb; | |
9ab86bbc | 1704 | while (--num_buf) { |
8fc3b9e9 MT |
1705 | int num_skb_frags; |
1706 | ||
295525e2 | 1707 | buf = virtnet_rq_get_buf(rq, &len, &ctx); |
03e9f8a0 | 1708 | if (unlikely(!buf)) { |
8fc3b9e9 | 1709 | pr_debug("%s: rx error: %d buffers out of %d missing\n", |
fdd819b2 | 1710 | dev->name, num_buf, |
012873d0 MT |
1711 | virtio16_to_cpu(vi->vdev, |
1712 | hdr->num_buffers)); | |
d12a26b7 | 1713 | DEV_STATS_INC(dev, rx_length_errors); |
8fc3b9e9 | 1714 | goto err_buf; |
3f2c31d9 | 1715 | } |
8fc3b9e9 | 1716 | |
61217d8f | 1717 | u64_stats_add(&stats->bytes, len); |
8fc3b9e9 | 1718 | page = virt_to_head_page(buf); |
28b39bc7 JW |
1719 | |
1720 | truesize = mergeable_ctx_to_truesize(ctx); | |
ef75cb51 HQ |
1721 | headroom = mergeable_ctx_to_headroom(ctx); |
1722 | tailroom = headroom ? sizeof(struct skb_shared_info) : 0; | |
1723 | room = SKB_DATA_ALIGN(headroom + tailroom); | |
1724 | if (unlikely(len > truesize - room)) { | |
56da5fd0 | 1725 | pr_debug("%s: rx error: len %u exceeds truesize %lu\n", |
ef75cb51 | 1726 | dev->name, len, (unsigned long)(truesize - room)); |
d12a26b7 | 1727 | DEV_STATS_INC(dev, rx_length_errors); |
680557cf MT |
1728 | goto err_skb; |
1729 | } | |
8fc3b9e9 MT |
1730 | |
1731 | num_skb_frags = skb_shinfo(curr_skb)->nr_frags; | |
2613af0e MD |
1732 | if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { |
1733 | struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); | |
8fc3b9e9 MT |
1734 | |
1735 | if (unlikely(!nskb)) | |
1736 | goto err_skb; | |
2613af0e MD |
1737 | if (curr_skb == head_skb) |
1738 | skb_shinfo(curr_skb)->frag_list = nskb; | |
1739 | else | |
1740 | curr_skb->next = nskb; | |
1741 | curr_skb = nskb; | |
1742 | head_skb->truesize += nskb->truesize; | |
1743 | num_skb_frags = 0; | |
1744 | } | |
1745 | if (curr_skb != head_skb) { | |
1746 | head_skb->data_len += len; | |
1747 | head_skb->len += len; | |
fb51879d | 1748 | head_skb->truesize += truesize; |
2613af0e | 1749 | } |
8fc3b9e9 | 1750 | offset = buf - page_address(page); |
ba275241 JW |
1751 | if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { |
1752 | put_page(page); | |
1753 | skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, | |
fb51879d | 1754 | len, truesize); |
ba275241 JW |
1755 | } else { |
1756 | skb_add_rx_frag(curr_skb, num_skb_frags, page, | |
fb51879d | 1757 | offset, len, truesize); |
ba275241 | 1758 | } |
8fc3b9e9 MT |
1759 | } |
1760 | ||
5377d758 | 1761 | ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); |
8fc3b9e9 MT |
1762 | return head_skb; |
1763 | ||
1764 | err_skb: | |
1765 | put_page(page); | |
80f50f91 XZ |
1766 | mergeable_buf_free(rq, num_buf, dev, stats); |
1767 | ||
8fc3b9e9 | 1768 | err_buf: |
61217d8f | 1769 | u64_stats_inc(&stats->drops); |
8fc3b9e9 MT |
1770 | dev_kfree_skb(head_skb); |
1771 | return NULL; | |
9ab86bbc SM |
1772 | } |
1773 | ||
91f41f01 AM |
1774 | static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash, |
1775 | struct sk_buff *skb) | |
1776 | { | |
1777 | enum pkt_hash_types rss_hash_type; | |
1778 | ||
1779 | if (!hdr_hash || !skb) | |
1780 | return; | |
1781 | ||
95bb6330 | 1782 | switch (__le16_to_cpu(hdr_hash->hash_report)) { |
91f41f01 AM |
1783 | case VIRTIO_NET_HASH_REPORT_TCPv4: |
1784 | case VIRTIO_NET_HASH_REPORT_UDPv4: | |
1785 | case VIRTIO_NET_HASH_REPORT_TCPv6: | |
1786 | case VIRTIO_NET_HASH_REPORT_UDPv6: | |
1787 | case VIRTIO_NET_HASH_REPORT_TCPv6_EX: | |
1788 | case VIRTIO_NET_HASH_REPORT_UDPv6_EX: | |
1789 | rss_hash_type = PKT_HASH_TYPE_L4; | |
1790 | break; | |
1791 | case VIRTIO_NET_HASH_REPORT_IPv4: | |
1792 | case VIRTIO_NET_HASH_REPORT_IPv6: | |
1793 | case VIRTIO_NET_HASH_REPORT_IPv6_EX: | |
1794 | rss_hash_type = PKT_HASH_TYPE_L3; | |
1795 | break; | |
1796 | case VIRTIO_NET_HASH_REPORT_NONE: | |
1797 | default: | |
1798 | rss_hash_type = PKT_HASH_TYPE_NONE; | |
1799 | } | |
95bb6330 | 1800 | skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type); |
91f41f01 AM |
1801 | } |
1802 | ||
7d9d60fd TM |
1803 | static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, |
1804 | void *buf, unsigned int len, void **ctx, | |
a0929a44 | 1805 | unsigned int *xdp_xmit, |
d46eeeaf | 1806 | struct virtnet_rq_stats *stats) |
9ab86bbc | 1807 | { |
e9d7417b | 1808 | struct net_device *dev = vi->dev; |
9ab86bbc | 1809 | struct sk_buff *skb; |
dae64749 | 1810 | struct virtio_net_common_hdr *hdr; |
3f2c31d9 | 1811 | |
bcff3162 | 1812 | if (unlikely(len < vi->hdr_len + ETH_HLEN)) { |
9ab86bbc | 1813 | pr_debug("%s: short packet %i\n", dev->name, len); |
d12a26b7 | 1814 | DEV_STATS_INC(dev, rx_length_errors); |
2311e06b | 1815 | virtnet_rq_free_buf(vi, rq, buf); |
7d9d60fd | 1816 | return; |
9ab86bbc | 1817 | } |
3f2c31d9 | 1818 | |
f121159d | 1819 | if (vi->mergeable_rx_bufs) |
7d9d60fd | 1820 | skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, |
a0929a44 | 1821 | stats); |
f121159d | 1822 | else if (vi->big_packets) |
a0929a44 | 1823 | skb = receive_big(dev, vi, rq, buf, len, stats); |
f121159d | 1824 | else |
a0929a44 | 1825 | skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); |
f121159d MT |
1826 | |
1827 | if (unlikely(!skb)) | |
7d9d60fd | 1828 | return; |
3f2c31d9 | 1829 | |
dae64749 | 1830 | hdr = skb_vnet_common_hdr(skb); |
91f41f01 | 1831 | if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) |
dae64749 | 1832 | virtio_skb_set_hash(&hdr->hash_v1_hdr, skb); |
3fa2a1df | 1833 | |
e858fae2 | 1834 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) |
10a8d94a | 1835 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
296f96fc | 1836 | |
e858fae2 MR |
1837 | if (virtio_net_hdr_to_skb(skb, &hdr->hdr, |
1838 | virtio_is_little_endian(vi->vdev))) { | |
1839 | net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", | |
1840 | dev->name, hdr->hdr.gso_type, | |
1841 | hdr->hdr.gso_size); | |
1842 | goto frame_err; | |
296f96fc RR |
1843 | } |
1844 | ||
133bbb18 | 1845 | skb_record_rx_queue(skb, vq2rxq(rq->vq)); |
d1dc06dc MR |
1846 | skb->protocol = eth_type_trans(skb, dev); |
1847 | pr_debug("Receiving skb proto 0x%04x len %i type %i\n", | |
1848 | ntohs(skb->protocol), skb->len, skb->pkt_type); | |
1849 | ||
0fbd050a | 1850 | napi_gro_receive(&rq->napi, skb); |
7d9d60fd | 1851 | return; |
296f96fc RR |
1852 | |
1853 | frame_err: | |
d12a26b7 | 1854 | DEV_STATS_INC(dev, rx_frame_errors); |
296f96fc RR |
1855 | dev_kfree_skb(skb); |
1856 | } | |
1857 | ||
192f68cf JW |
1858 | /* Unlike mergeable buffers, all buffers are allocated to the |
1859 | * same size, except for the headroom. For this reason we do | |
1860 | * not need to use mergeable_len_to_ctx here - it is enough | |
1861 | * to store the headroom as the context ignoring the truesize. | |
1862 | */ | |
946fa564 MT |
1863 | static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, |
1864 | gfp_t gfp) | |
296f96fc | 1865 | { |
f6b10209 | 1866 | char *buf; |
2de2f7f4 | 1867 | unsigned int xdp_headroom = virtnet_get_headroom(vi); |
192f68cf | 1868 | void *ctx = (void *)(unsigned long)xdp_headroom; |
f6b10209 | 1869 | int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; |
9ab86bbc | 1870 | int err; |
3f2c31d9 | 1871 | |
f6b10209 JW |
1872 | len = SKB_DATA_ALIGN(len) + |
1873 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
295525e2 XZ |
1874 | |
1875 | buf = virtnet_rq_alloc(rq, len, gfp); | |
1876 | if (unlikely(!buf)) | |
9ab86bbc | 1877 | return -ENOMEM; |
296f96fc | 1878 | |
295525e2 XZ |
1879 | virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom, |
1880 | vi->hdr_len + GOOD_PACKET_LEN); | |
1881 | ||
192f68cf | 1882 | err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); |
295525e2 XZ |
1883 | if (err < 0) { |
1884 | if (rq->do_dma) | |
1885 | virtnet_rq_unmap(rq, buf, 0); | |
f6b10209 | 1886 | put_page(virt_to_head_page(buf)); |
295525e2 XZ |
1887 | } |
1888 | ||
9ab86bbc SM |
1889 | return err; |
1890 | } | |
97402b96 | 1891 | |
012873d0 MT |
1892 | static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, |
1893 | gfp_t gfp) | |
9ab86bbc | 1894 | { |
9ab86bbc SM |
1895 | struct page *first, *list = NULL; |
1896 | char *p; | |
1897 | int i, err, offset; | |
1898 | ||
4959aebb | 1899 | sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2); |
a5835440 | 1900 | |
4959aebb GL |
1901 | /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */ |
1902 | for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) { | |
e9d7417b | 1903 | first = get_a_page(rq, gfp); |
9ab86bbc SM |
1904 | if (!first) { |
1905 | if (list) | |
e9d7417b | 1906 | give_pages(rq, list); |
9ab86bbc | 1907 | return -ENOMEM; |
97402b96 | 1908 | } |
e9d7417b | 1909 | sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); |
97402b96 | 1910 | |
9ab86bbc SM |
1911 | /* chain new page in list head to match sg */ |
1912 | first->private = (unsigned long)list; | |
1913 | list = first; | |
1914 | } | |
296f96fc | 1915 | |
e9d7417b | 1916 | first = get_a_page(rq, gfp); |
9ab86bbc | 1917 | if (!first) { |
e9d7417b | 1918 | give_pages(rq, list); |
9ab86bbc SM |
1919 | return -ENOMEM; |
1920 | } | |
1921 | p = page_address(first); | |
1922 | ||
e9d7417b | 1923 | /* rq->sg[0], rq->sg[1] share the same page */ |
012873d0 MT |
1924 | /* a separated rq->sg[0] for header - required in case !any_header_sg */ |
1925 | sg_set_buf(&rq->sg[0], p, vi->hdr_len); | |
9ab86bbc | 1926 | |
e9d7417b | 1927 | /* rq->sg[1] for data packet, from offset */ |
9ab86bbc | 1928 | offset = sizeof(struct padded_vnet_hdr); |
e9d7417b | 1929 | sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); |
9ab86bbc SM |
1930 | |
1931 | /* chain first in list head */ | |
1932 | first->private = (unsigned long)list; | |
4959aebb | 1933 | err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2, |
9dc7b9e4 | 1934 | first, gfp); |
9ab86bbc | 1935 | if (err < 0) |
e9d7417b | 1936 | give_pages(rq, first); |
9ab86bbc SM |
1937 | |
1938 | return err; | |
296f96fc RR |
1939 | } |
1940 | ||
d85b758f | 1941 | static unsigned int get_mergeable_buf_len(struct receive_queue *rq, |
3cc81a9a JW |
1942 | struct ewma_pkt_len *avg_pkt_len, |
1943 | unsigned int room) | |
3f2c31d9 | 1944 | { |
c1ddc42d AM |
1945 | struct virtnet_info *vi = rq->vq->vdev->priv; |
1946 | const size_t hdr_len = vi->hdr_len; | |
fbf28d78 MD |
1947 | unsigned int len; |
1948 | ||
3cc81a9a JW |
1949 | if (room) |
1950 | return PAGE_SIZE - room; | |
1951 | ||
1952 | len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), | |
f0c3192c | 1953 | rq->min_buf_len, PAGE_SIZE - hdr_len); |
3cc81a9a | 1954 | |
e377fcc8 | 1955 | return ALIGN(len, L1_CACHE_BYTES); |
fbf28d78 MD |
1956 | } |
1957 | ||
2de2f7f4 JF |
1958 | static int add_recvbuf_mergeable(struct virtnet_info *vi, |
1959 | struct receive_queue *rq, gfp_t gfp) | |
fbf28d78 | 1960 | { |
fb51879d | 1961 | struct page_frag *alloc_frag = &rq->alloc_frag; |
2de2f7f4 | 1962 | unsigned int headroom = virtnet_get_headroom(vi); |
3cc81a9a JW |
1963 | unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; |
1964 | unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); | |
295525e2 | 1965 | unsigned int len, hole; |
680557cf | 1966 | void *ctx; |
295525e2 | 1967 | char *buf; |
3f2c31d9 | 1968 | int err; |
3f2c31d9 | 1969 | |
3cc81a9a JW |
1970 | /* Extra tailroom is needed to satisfy XDP's assumption. This |
1971 | * means rx frags coalescing won't work, but consider we've | |
1972 | * disabled GSO for XDP, it won't be a big issue. | |
1973 | */ | |
1974 | len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); | |
295525e2 XZ |
1975 | |
1976 | buf = virtnet_rq_alloc(rq, len + room, gfp); | |
1977 | if (unlikely(!buf)) | |
9ab86bbc | 1978 | return -ENOMEM; |
ab7db917 | 1979 | |
2de2f7f4 | 1980 | buf += headroom; /* advance address leaving hole at front of pkt */ |
fb51879d | 1981 | hole = alloc_frag->size - alloc_frag->offset; |
3cc81a9a | 1982 | if (hole < len + room) { |
ab7db917 MD |
1983 | /* To avoid internal fragmentation, if there is very likely not |
1984 | * enough space for another buffer, add the remaining space to | |
1daa8790 | 1985 | * the current buffer. |
484beac2 HQ |
1986 | * XDP core assumes that frame_size of xdp_buff and the length |
1987 | * of the frag are PAGE_SIZE, so we disable the hole mechanism. | |
ab7db917 | 1988 | */ |
484beac2 HQ |
1989 | if (!headroom) |
1990 | len += hole; | |
fb51879d MD |
1991 | alloc_frag->offset += hole; |
1992 | } | |
3f2c31d9 | 1993 | |
295525e2 XZ |
1994 | virtnet_rq_init_one_sg(rq, buf, len); |
1995 | ||
ef75cb51 | 1996 | ctx = mergeable_len_to_ctx(len + room, headroom); |
680557cf | 1997 | err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); |
295525e2 XZ |
1998 | if (err < 0) { |
1999 | if (rq->do_dma) | |
2000 | virtnet_rq_unmap(rq, buf, 0); | |
2613af0e | 2001 | put_page(virt_to_head_page(buf)); |
295525e2 | 2002 | } |
3f2c31d9 | 2003 | |
9ab86bbc SM |
2004 | return err; |
2005 | } | |
3f2c31d9 | 2006 | |
b2baed69 RR |
2007 | /* |
2008 | * Returns false if we couldn't fill entirely (OOM). | |
2009 | * | |
2010 | * Normally run in the receive path, but can also be run from ndo_open | |
2011 | * before we're receiving packets, or from refill_work which is | |
2012 | * careful to disable receiving (using napi_disable). | |
2013 | */ | |
946fa564 MT |
2014 | static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, |
2015 | gfp_t gfp) | |
9ab86bbc SM |
2016 | { |
2017 | int err; | |
1788f495 | 2018 | bool oom; |
3f2c31d9 | 2019 | |
9ab86bbc SM |
2020 | do { |
2021 | if (vi->mergeable_rx_bufs) | |
2de2f7f4 | 2022 | err = add_recvbuf_mergeable(vi, rq, gfp); |
9ab86bbc | 2023 | else if (vi->big_packets) |
012873d0 | 2024 | err = add_recvbuf_big(vi, rq, gfp); |
9ab86bbc | 2025 | else |
946fa564 | 2026 | err = add_recvbuf_small(vi, rq, gfp); |
3f2c31d9 | 2027 | |
1788f495 | 2028 | oom = err == -ENOMEM; |
9ed4cb07 | 2029 | if (err) |
3f2c31d9 | 2030 | break; |
b7dfde95 | 2031 | } while (rq->vq->num_free); |
461f03dc | 2032 | if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { |
01c32598 MT |
2033 | unsigned long flags; |
2034 | ||
2035 | flags = u64_stats_update_begin_irqsave(&rq->stats.syncp); | |
61217d8f | 2036 | u64_stats_inc(&rq->stats.kicks); |
01c32598 | 2037 | u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); |
461f03dc TM |
2038 | } |
2039 | ||
3161e453 | 2040 | return !oom; |
3f2c31d9 MM |
2041 | } |
2042 | ||
18445c4d | 2043 | static void skb_recv_done(struct virtqueue *rvq) |
296f96fc RR |
2044 | { |
2045 | struct virtnet_info *vi = rvq->vdev->priv; | |
986a4f4d | 2046 | struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; |
e9d7417b | 2047 | |
62087995 | 2048 | rq->calls++; |
e4e8452a | 2049 | virtqueue_napi_schedule(&rq->napi, rvq); |
296f96fc RR |
2050 | } |
2051 | ||
e4e8452a | 2052 | static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi) |
3e9d08ec | 2053 | { |
e4e8452a | 2054 | napi_enable(napi); |
3e9d08ec BR |
2055 | |
2056 | /* If all buffers were filled by other side before we napi_enabled, we | |
e4e8452a WB |
2057 | * won't get another interrupt, so process any outstanding packets now. |
2058 | * Call local_bh_enable after to trigger softIRQ processing. | |
2059 | */ | |
2060 | local_bh_disable(); | |
2061 | virtqueue_napi_schedule(napi, vq); | |
2062 | local_bh_enable(); | |
3e9d08ec BR |
2063 | } |
2064 | ||
b92f1e67 WB |
2065 | static void virtnet_napi_tx_enable(struct virtnet_info *vi, |
2066 | struct virtqueue *vq, | |
2067 | struct napi_struct *napi) | |
2068 | { | |
2069 | if (!napi->weight) | |
2070 | return; | |
2071 | ||
2072 | /* Tx napi touches cachelines on the cpu handling tx interrupts. Only | |
2073 | * enable the feature if this is likely affine with the transmit path. | |
2074 | */ | |
2075 | if (!vi->affinity_hint_set) { | |
2076 | napi->weight = 0; | |
2077 | return; | |
2078 | } | |
2079 | ||
2080 | return virtnet_napi_enable(vq, napi); | |
2081 | } | |
2082 | ||
78a57b48 WB |
2083 | static void virtnet_napi_tx_disable(struct napi_struct *napi) |
2084 | { | |
2085 | if (napi->weight) | |
2086 | napi_disable(napi); | |
2087 | } | |
2088 | ||
3161e453 RR |
2089 | static void refill_work(struct work_struct *work) |
2090 | { | |
e9d7417b JW |
2091 | struct virtnet_info *vi = |
2092 | container_of(work, struct virtnet_info, refill.work); | |
3161e453 | 2093 | bool still_empty; |
986a4f4d JW |
2094 | int i; |
2095 | ||
55257d72 | 2096 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
986a4f4d | 2097 | struct receive_queue *rq = &vi->rq[i]; |
3161e453 | 2098 | |
986a4f4d | 2099 | napi_disable(&rq->napi); |
946fa564 | 2100 | still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); |
e4e8452a | 2101 | virtnet_napi_enable(rq->vq, &rq->napi); |
3161e453 | 2102 | |
986a4f4d JW |
2103 | /* In theory, this can happen: if we don't get any buffers in |
2104 | * we will *never* try to fill again. | |
2105 | */ | |
2106 | if (still_empty) | |
2107 | schedule_delayed_work(&vi->refill, HZ/2); | |
2108 | } | |
3161e453 RR |
2109 | } |
2110 | ||
2471c75e JDB |
2111 | static int virtnet_receive(struct receive_queue *rq, int budget, |
2112 | unsigned int *xdp_xmit) | |
296f96fc | 2113 | { |
e9d7417b | 2114 | struct virtnet_info *vi = rq->vq->vdev->priv; |
d46eeeaf | 2115 | struct virtnet_rq_stats stats = {}; |
a0929a44 | 2116 | unsigned int len; |
61217d8f | 2117 | int packets = 0; |
9ab86bbc | 2118 | void *buf; |
a0929a44 | 2119 | int i; |
296f96fc | 2120 | |
192f68cf | 2121 | if (!vi->big_packets || vi->mergeable_rx_bufs) { |
680557cf MT |
2122 | void *ctx; |
2123 | ||
61217d8f | 2124 | while (packets < budget && |
295525e2 | 2125 | (buf = virtnet_rq_get_buf(rq, &len, &ctx))) { |
a0929a44 | 2126 | receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); |
61217d8f | 2127 | packets++; |
680557cf MT |
2128 | } |
2129 | } else { | |
61217d8f | 2130 | while (packets < budget && |
295525e2 | 2131 | (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) { |
a0929a44 | 2132 | receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats); |
61217d8f | 2133 | packets++; |
680557cf | 2134 | } |
296f96fc RR |
2135 | } |
2136 | ||
718be6ba | 2137 | if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { |
5a159128 JW |
2138 | if (!try_fill_recv(vi, rq, GFP_ATOMIC)) { |
2139 | spin_lock(&vi->refill_lock); | |
2140 | if (vi->refill_enabled) | |
2141 | schedule_delayed_work(&vi->refill, 0); | |
2142 | spin_unlock(&vi->refill_lock); | |
2143 | } | |
3161e453 | 2144 | } |
296f96fc | 2145 | |
61217d8f | 2146 | u64_stats_set(&stats.packets, packets); |
d7dfc5cf | 2147 | u64_stats_update_begin(&rq->stats.syncp); |
a0929a44 TM |
2148 | for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) { |
2149 | size_t offset = virtnet_rq_stats_desc[i].offset; | |
61217d8f | 2150 | u64_stats_t *item, *src; |
a0929a44 | 2151 | |
61217d8f ED |
2152 | item = (u64_stats_t *)((u8 *)&rq->stats + offset); |
2153 | src = (u64_stats_t *)((u8 *)&stats + offset); | |
2154 | u64_stats_add(item, u64_stats_read(src)); | |
a0929a44 | 2155 | } |
d7dfc5cf | 2156 | u64_stats_update_end(&rq->stats.syncp); |
61845d20 | 2157 | |
61217d8f | 2158 | return packets; |
2ffa7598 JW |
2159 | } |
2160 | ||
7b0411ef WB |
2161 | static void virtnet_poll_cleantx(struct receive_queue *rq) |
2162 | { | |
2163 | struct virtnet_info *vi = rq->vq->vdev->priv; | |
2164 | unsigned int index = vq2rxq(rq->vq); | |
2165 | struct send_queue *sq = &vi->sq[index]; | |
2166 | struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); | |
2167 | ||
534da5e8 | 2168 | if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) |
7b0411ef WB |
2169 | return; |
2170 | ||
2171 | if (__netif_tx_trylock(txq)) { | |
ebcce492 XZ |
2172 | if (sq->reset) { |
2173 | __netif_tx_unlock(txq); | |
2174 | return; | |
2175 | } | |
2176 | ||
a7766ef1 MT |
2177 | do { |
2178 | virtqueue_disable_cb(sq->vq); | |
5da7137d | 2179 | free_old_xmit(sq, true); |
a7766ef1 | 2180 | } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); |
22bc63c5 MT |
2181 | |
2182 | if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) | |
2183 | netif_tx_wake_queue(txq); | |
2184 | ||
7b0411ef WB |
2185 | __netif_tx_unlock(txq); |
2186 | } | |
7b0411ef WB |
2187 | } |
2188 | ||
62087995 HQ |
2189 | static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq) |
2190 | { | |
2191 | struct dim_sample cur_sample = {}; | |
2192 | ||
2193 | if (!rq->packets_in_napi) | |
2194 | return; | |
2195 | ||
2196 | u64_stats_update_begin(&rq->stats.syncp); | |
2197 | dim_update_sample(rq->calls, | |
2198 | u64_stats_read(&rq->stats.packets), | |
2199 | u64_stats_read(&rq->stats.bytes), | |
2200 | &cur_sample); | |
2201 | u64_stats_update_end(&rq->stats.syncp); | |
2202 | ||
2203 | net_dim(&rq->dim, cur_sample); | |
2204 | rq->packets_in_napi = 0; | |
2205 | } | |
2206 | ||
2ffa7598 JW |
2207 | static int virtnet_poll(struct napi_struct *napi, int budget) |
2208 | { | |
2209 | struct receive_queue *rq = | |
2210 | container_of(napi, struct receive_queue, napi); | |
9267c430 JW |
2211 | struct virtnet_info *vi = rq->vq->vdev->priv; |
2212 | struct send_queue *sq; | |
2a43565c | 2213 | unsigned int received; |
2471c75e | 2214 | unsigned int xdp_xmit = 0; |
62087995 | 2215 | bool napi_complete; |
2ffa7598 | 2216 | |
7b0411ef WB |
2217 | virtnet_poll_cleantx(rq); |
2218 | ||
186b3c99 | 2219 | received = virtnet_receive(rq, budget, &xdp_xmit); |
62087995 | 2220 | rq->packets_in_napi += received; |
2ffa7598 | 2221 | |
ad7e615f MK |
2222 | if (xdp_xmit & VIRTIO_XDP_REDIR) |
2223 | xdp_do_flush(); | |
2224 | ||
8329d98e | 2225 | /* Out of packets? */ |
62087995 HQ |
2226 | if (received < budget) { |
2227 | napi_complete = virtqueue_napi_complete(napi, rq->vq, received); | |
2228 | if (napi_complete && rq->dim_enabled) | |
2229 | virtnet_rx_dim_update(vi, rq); | |
2230 | } | |
296f96fc | 2231 | |
2471c75e | 2232 | if (xdp_xmit & VIRTIO_XDP_TX) { |
97c2c69e | 2233 | sq = virtnet_xdp_get_sq(vi); |
461f03dc TM |
2234 | if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { |
2235 | u64_stats_update_begin(&sq->stats.syncp); | |
61217d8f | 2236 | u64_stats_inc(&sq->stats.kicks); |
461f03dc TM |
2237 | u64_stats_update_end(&sq->stats.syncp); |
2238 | } | |
97c2c69e | 2239 | virtnet_xdp_put_sq(vi, sq); |
9267c430 | 2240 | } |
186b3c99 | 2241 | |
296f96fc RR |
2242 | return received; |
2243 | } | |
2244 | ||
5306623a FL |
2245 | static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index) |
2246 | { | |
2247 | virtnet_napi_tx_disable(&vi->sq[qp_index].napi); | |
2248 | napi_disable(&vi->rq[qp_index].napi); | |
2249 | xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); | |
2250 | } | |
2251 | ||
2252 | static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index) | |
2253 | { | |
2254 | struct net_device *dev = vi->dev; | |
2255 | int err; | |
2256 | ||
2257 | err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index, | |
2258 | vi->rq[qp_index].napi.napi_id); | |
2259 | if (err < 0) | |
2260 | return err; | |
2261 | ||
2262 | err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq, | |
2263 | MEM_TYPE_PAGE_SHARED, NULL); | |
2264 | if (err < 0) | |
2265 | goto err_xdp_reg_mem_model; | |
2266 | ||
2267 | virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi); | |
2268 | virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi); | |
2269 | ||
2270 | return 0; | |
2271 | ||
2272 | err_xdp_reg_mem_model: | |
2273 | xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); | |
2274 | return err; | |
2275 | } | |
2276 | ||
986a4f4d JW |
2277 | static int virtnet_open(struct net_device *dev) |
2278 | { | |
2279 | struct virtnet_info *vi = netdev_priv(dev); | |
754b8a21 | 2280 | int i, err; |
986a4f4d | 2281 | |
5a159128 JW |
2282 | enable_delayed_refill(vi); |
2283 | ||
e4166625 JW |
2284 | for (i = 0; i < vi->max_queue_pairs; i++) { |
2285 | if (i < vi->curr_queue_pairs) | |
2286 | /* Make sure we have some buffers: if oom use wq. */ | |
946fa564 | 2287 | if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) |
e4166625 | 2288 | schedule_delayed_work(&vi->refill, 0); |
754b8a21 | 2289 | |
5306623a | 2290 | err = virtnet_enable_queue_pair(vi, i); |
754b8a21 | 2291 | if (err < 0) |
5306623a | 2292 | goto err_enable_qp; |
986a4f4d JW |
2293 | } |
2294 | ||
2295 | return 0; | |
5306623a FL |
2296 | |
2297 | err_enable_qp: | |
2298 | disable_delayed_refill(vi); | |
2299 | cancel_delayed_work_sync(&vi->refill); | |
2300 | ||
62087995 | 2301 | for (i--; i >= 0; i--) { |
5306623a | 2302 | virtnet_disable_queue_pair(vi, i); |
62087995 HQ |
2303 | cancel_work_sync(&vi->rq[i].dim.work); |
2304 | } | |
2305 | ||
5306623a | 2306 | return err; |
986a4f4d JW |
2307 | } |
2308 | ||
b92f1e67 WB |
2309 | static int virtnet_poll_tx(struct napi_struct *napi, int budget) |
2310 | { | |
2311 | struct send_queue *sq = container_of(napi, struct send_queue, napi); | |
2312 | struct virtnet_info *vi = sq->vq->vdev->priv; | |
534da5e8 TM |
2313 | unsigned int index = vq2txq(sq->vq); |
2314 | struct netdev_queue *txq; | |
5a2f966d MT |
2315 | int opaque; |
2316 | bool done; | |
b92f1e67 | 2317 | |
534da5e8 TM |
2318 | if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { |
2319 | /* We don't need to enable cb for XDP */ | |
2320 | napi_complete_done(napi, 0); | |
2321 | return 0; | |
2322 | } | |
2323 | ||
2324 | txq = netdev_get_tx_queue(vi->dev, index); | |
b92f1e67 | 2325 | __netif_tx_lock(txq, raw_smp_processor_id()); |
5a2f966d | 2326 | virtqueue_disable_cb(sq->vq); |
5da7137d | 2327 | free_old_xmit(sq, true); |
5a2f966d | 2328 | |
22bc63c5 MT |
2329 | if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) |
2330 | netif_tx_wake_queue(txq); | |
2331 | ||
5a2f966d MT |
2332 | opaque = virtqueue_enable_cb_prepare(sq->vq); |
2333 | ||
2334 | done = napi_complete_done(napi, 0); | |
2335 | ||
2336 | if (!done) | |
2337 | virtqueue_disable_cb(sq->vq); | |
2338 | ||
b92f1e67 WB |
2339 | __netif_tx_unlock(txq); |
2340 | ||
5a2f966d MT |
2341 | if (done) { |
2342 | if (unlikely(virtqueue_poll(sq->vq, opaque))) { | |
2343 | if (napi_schedule_prep(napi)) { | |
2344 | __netif_tx_lock(txq, raw_smp_processor_id()); | |
2345 | virtqueue_disable_cb(sq->vq); | |
2346 | __netif_tx_unlock(txq); | |
2347 | __napi_schedule(napi); | |
2348 | } | |
2349 | } | |
2350 | } | |
b92f1e67 | 2351 | |
b92f1e67 WB |
2352 | return 0; |
2353 | } | |
2354 | ||
e9d7417b | 2355 | static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) |
296f96fc | 2356 | { |
012873d0 | 2357 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
296f96fc | 2358 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; |
e9d7417b | 2359 | struct virtnet_info *vi = sq->vq->vdev->priv; |
e2fcad58 | 2360 | int num_sg; |
012873d0 | 2361 | unsigned hdr_len = vi->hdr_len; |
e7428e95 | 2362 | bool can_push; |
296f96fc | 2363 | |
e174961c | 2364 | pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); |
e7428e95 MT |
2365 | |
2366 | can_push = vi->any_header_sg && | |
2367 | !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && | |
2368 | !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; | |
2369 | /* Even if we can, don't push here yet as this would skew | |
2370 | * csum_start offset below. */ | |
2371 | if (can_push) | |
012873d0 | 2372 | hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); |
e7428e95 | 2373 | else |
dae64749 | 2374 | hdr = &skb_vnet_common_hdr(skb)->mrg_hdr; |
296f96fc | 2375 | |
e858fae2 | 2376 | if (virtio_net_hdr_from_skb(skb, &hdr->hdr, |
fd3a8862 WB |
2377 | virtio_is_little_endian(vi->vdev), false, |
2378 | 0)) | |
85eb1389 | 2379 | return -EPROTO; |
296f96fc | 2380 | |
3f2c31d9 | 2381 | if (vi->mergeable_rx_bufs) |
012873d0 | 2382 | hdr->num_buffers = 0; |
3f2c31d9 | 2383 | |
547c890c | 2384 | sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); |
e7428e95 MT |
2385 | if (can_push) { |
2386 | __skb_push(skb, hdr_len); | |
2387 | num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); | |
e2fcad58 JD |
2388 | if (unlikely(num_sg < 0)) |
2389 | return num_sg; | |
e7428e95 MT |
2390 | /* Pull header back to avoid skew in tx bytes calculations. */ |
2391 | __skb_pull(skb, hdr_len); | |
2392 | } else { | |
2393 | sg_set_buf(sq->sg, hdr, hdr_len); | |
e2fcad58 JD |
2394 | num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); |
2395 | if (unlikely(num_sg < 0)) | |
2396 | return num_sg; | |
2397 | num_sg++; | |
e7428e95 | 2398 | } |
9dc7b9e4 | 2399 | return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); |
11a3a154 RR |
2400 | } |
2401 | ||
424efe9c | 2402 | static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) |
99ffc696 RR |
2403 | { |
2404 | struct virtnet_info *vi = netdev_priv(dev); | |
986a4f4d JW |
2405 | int qnum = skb_get_queue_mapping(skb); |
2406 | struct send_queue *sq = &vi->sq[qnum]; | |
9ed4cb07 | 2407 | int err; |
4b7fd2e6 | 2408 | struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); |
6b16f9ee | 2409 | bool kick = !netdev_xmit_more(); |
b92f1e67 | 2410 | bool use_napi = sq->napi.weight; |
2cb9c6ba | 2411 | |
2cb9c6ba | 2412 | /* Free up any pending old buffers before queueing new ones. */ |
a7766ef1 MT |
2413 | do { |
2414 | if (use_napi) | |
2415 | virtqueue_disable_cb(sq->vq); | |
2416 | ||
5da7137d | 2417 | free_old_xmit(sq, false); |
99ffc696 | 2418 | |
a7766ef1 MT |
2419 | } while (use_napi && kick && |
2420 | unlikely(!virtqueue_enable_cb_delayed(sq->vq))); | |
bdb12e0d | 2421 | |
074c3582 JK |
2422 | /* timestamp packet in software */ |
2423 | skb_tx_timestamp(skb); | |
2424 | ||
03f191ba | 2425 | /* Try to transmit */ |
b7dfde95 | 2426 | err = xmit_skb(sq, skb); |
48925e37 | 2427 | |
9ed4cb07 | 2428 | /* This should not happen! */ |
681daee2 | 2429 | if (unlikely(err)) { |
d12a26b7 | 2430 | DEV_STATS_INC(dev, tx_fifo_errors); |
9ed4cb07 RR |
2431 | if (net_ratelimit()) |
2432 | dev_warn(&dev->dev, | |
7934b481 YS |
2433 | "Unexpected TXQ (%d) queue failure: %d\n", |
2434 | qnum, err); | |
d12a26b7 | 2435 | DEV_STATS_INC(dev, tx_dropped); |
85e94525 | 2436 | dev_kfree_skb_any(skb); |
58eba97d | 2437 | return NETDEV_TX_OK; |
296f96fc | 2438 | } |
03f191ba | 2439 | |
48925e37 | 2440 | /* Don't wait up for transmitted skbs to be freed. */ |
b92f1e67 WB |
2441 | if (!use_napi) { |
2442 | skb_orphan(skb); | |
895b5c9f | 2443 | nf_reset_ct(skb); |
b92f1e67 | 2444 | } |
48925e37 | 2445 | |
b8ef4809 | 2446 | check_sq_full_and_disable(vi, dev, sq); |
48925e37 | 2447 | |
461f03dc TM |
2448 | if (kick || netif_xmit_stopped(txq)) { |
2449 | if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { | |
2450 | u64_stats_update_begin(&sq->stats.syncp); | |
61217d8f | 2451 | u64_stats_inc(&sq->stats.kicks); |
461f03dc TM |
2452 | u64_stats_update_end(&sq->stats.syncp); |
2453 | } | |
2454 | } | |
296f96fc | 2455 | |
0b725a2c | 2456 | return NETDEV_TX_OK; |
c223a078 DM |
2457 | } |
2458 | ||
6a4763e2 XZ |
2459 | static int virtnet_rx_resize(struct virtnet_info *vi, |
2460 | struct receive_queue *rq, u32 ring_num) | |
2461 | { | |
2462 | bool running = netif_running(vi->dev); | |
2463 | int err, qindex; | |
2464 | ||
2465 | qindex = rq - vi->rq; | |
2466 | ||
62087995 | 2467 | if (running) { |
6a4763e2 | 2468 | napi_disable(&rq->napi); |
62087995 HQ |
2469 | cancel_work_sync(&rq->dim.work); |
2470 | } | |
6a4763e2 | 2471 | |
2311e06b | 2472 | err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf); |
6a4763e2 XZ |
2473 | if (err) |
2474 | netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); | |
2475 | ||
2476 | if (!try_fill_recv(vi, rq, GFP_KERNEL)) | |
2477 | schedule_delayed_work(&vi->refill, 0); | |
2478 | ||
2479 | if (running) | |
2480 | virtnet_napi_enable(rq->vq, &rq->napi); | |
2481 | return err; | |
2482 | } | |
2483 | ||
ebcce492 XZ |
2484 | static int virtnet_tx_resize(struct virtnet_info *vi, |
2485 | struct send_queue *sq, u32 ring_num) | |
2486 | { | |
2487 | bool running = netif_running(vi->dev); | |
2488 | struct netdev_queue *txq; | |
2489 | int err, qindex; | |
2490 | ||
2491 | qindex = sq - vi->sq; | |
2492 | ||
2493 | if (running) | |
2494 | virtnet_napi_tx_disable(&sq->napi); | |
2495 | ||
2496 | txq = netdev_get_tx_queue(vi->dev, qindex); | |
2497 | ||
2498 | /* 1. wait all ximt complete | |
2499 | * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue() | |
2500 | */ | |
2501 | __netif_tx_lock_bh(txq); | |
2502 | ||
2503 | /* Prevent rx poll from accessing sq. */ | |
2504 | sq->reset = true; | |
2505 | ||
2506 | /* Prevent the upper layer from trying to send packets. */ | |
2507 | netif_stop_subqueue(vi->dev, qindex); | |
2508 | ||
2509 | __netif_tx_unlock_bh(txq); | |
2510 | ||
2511 | err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf); | |
2512 | if (err) | |
2513 | netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err); | |
2514 | ||
2515 | __netif_tx_lock_bh(txq); | |
2516 | sq->reset = false; | |
2517 | netif_tx_wake_queue(txq); | |
2518 | __netif_tx_unlock_bh(txq); | |
2519 | ||
2520 | if (running) | |
2521 | virtnet_napi_tx_enable(vi, sq->vq, &sq->napi); | |
2522 | return err; | |
2523 | } | |
2524 | ||
40cbfc37 AK |
2525 | /* |
2526 | * Send command via the control virtqueue and check status. Commands | |
2527 | * supported by the hypervisor, as indicated by feature bits, should | |
788a8b6d | 2528 | * never fail unless improperly formatted. |
40cbfc37 AK |
2529 | */ |
2530 | static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, | |
d24bae32 | 2531 | struct scatterlist *out) |
40cbfc37 | 2532 | { |
f7bc9594 | 2533 | struct scatterlist *sgs[4], hdr, stat; |
d24bae32 | 2534 | unsigned out_num = 0, tmp; |
222722bc | 2535 | int ret; |
40cbfc37 AK |
2536 | |
2537 | /* Caller should know better */ | |
f7bc9594 | 2538 | BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); |
40cbfc37 | 2539 | |
12e57169 MT |
2540 | vi->ctrl->status = ~0; |
2541 | vi->ctrl->hdr.class = class; | |
2542 | vi->ctrl->hdr.cmd = cmd; | |
f7bc9594 | 2543 | /* Add header */ |
12e57169 | 2544 | sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); |
f7bc9594 | 2545 | sgs[out_num++] = &hdr; |
40cbfc37 | 2546 | |
f7bc9594 RR |
2547 | if (out) |
2548 | sgs[out_num++] = out; | |
40cbfc37 | 2549 | |
f7bc9594 | 2550 | /* Add return status. */ |
12e57169 | 2551 | sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); |
d24bae32 | 2552 | sgs[out_num] = &stat; |
40cbfc37 | 2553 | |
d24bae32 | 2554 | BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); |
222722bc YW |
2555 | ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); |
2556 | if (ret < 0) { | |
2557 | dev_warn(&vi->vdev->dev, | |
2558 | "Failed to add sgs for command vq: %d\n.", ret); | |
2559 | return false; | |
2560 | } | |
40cbfc37 | 2561 | |
67975901 | 2562 | if (unlikely(!virtqueue_kick(vi->cvq))) |
12e57169 | 2563 | return vi->ctrl->status == VIRTIO_NET_OK; |
40cbfc37 AK |
2564 | |
2565 | /* Spin for a response, the kick causes an ioport write, trapping | |
2566 | * into the hypervisor, so the request should be handled immediately. | |
2567 | */ | |
047b9b94 | 2568 | while (!virtqueue_get_buf(vi->cvq, &tmp) && |
0d197a14 JW |
2569 | !virtqueue_is_broken(vi->cvq)) { |
2570 | cond_resched(); | |
40cbfc37 | 2571 | cpu_relax(); |
0d197a14 | 2572 | } |
40cbfc37 | 2573 | |
12e57169 | 2574 | return vi->ctrl->status == VIRTIO_NET_OK; |
40cbfc37 AK |
2575 | } |
2576 | ||
9c46f6d4 AW |
2577 | static int virtnet_set_mac_address(struct net_device *dev, void *p) |
2578 | { | |
2579 | struct virtnet_info *vi = netdev_priv(dev); | |
2580 | struct virtio_device *vdev = vi->vdev; | |
f2f2c8b4 | 2581 | int ret; |
e37e2ff3 | 2582 | struct sockaddr *addr; |
7e58d5ae | 2583 | struct scatterlist sg; |
9c46f6d4 | 2584 | |
ba5e4426 SS |
2585 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) |
2586 | return -EOPNOTSUPP; | |
2587 | ||
801822d1 | 2588 | addr = kmemdup(p, sizeof(*addr), GFP_KERNEL); |
e37e2ff3 AL |
2589 | if (!addr) |
2590 | return -ENOMEM; | |
e37e2ff3 AL |
2591 | |
2592 | ret = eth_prepare_mac_addr_change(dev, addr); | |
f2f2c8b4 | 2593 | if (ret) |
e37e2ff3 | 2594 | goto out; |
9c46f6d4 | 2595 | |
7e58d5ae AK |
2596 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { |
2597 | sg_init_one(&sg, addr->sa_data, dev->addr_len); | |
2598 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, | |
d24bae32 | 2599 | VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { |
7e58d5ae AK |
2600 | dev_warn(&vdev->dev, |
2601 | "Failed to set mac address by vq command.\n"); | |
e37e2ff3 AL |
2602 | ret = -EINVAL; |
2603 | goto out; | |
7e58d5ae | 2604 | } |
7e93a02f MT |
2605 | } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && |
2606 | !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { | |
855e0c52 RR |
2607 | unsigned int i; |
2608 | ||
2609 | /* Naturally, this has an atomicity problem. */ | |
2610 | for (i = 0; i < dev->addr_len; i++) | |
2611 | virtio_cwrite8(vdev, | |
2612 | offsetof(struct virtio_net_config, mac) + | |
2613 | i, addr->sa_data[i]); | |
7e58d5ae AK |
2614 | } |
2615 | ||
2616 | eth_commit_mac_addr_change(dev, p); | |
e37e2ff3 | 2617 | ret = 0; |
9c46f6d4 | 2618 | |
e37e2ff3 AL |
2619 | out: |
2620 | kfree(addr); | |
2621 | return ret; | |
9c46f6d4 AW |
2622 | } |
2623 | ||
bc1f4470 | 2624 | static void virtnet_stats(struct net_device *dev, |
2625 | struct rtnl_link_stats64 *tot) | |
3fa2a1df | 2626 | { |
2627 | struct virtnet_info *vi = netdev_priv(dev); | |
3fa2a1df | 2628 | unsigned int start; |
d7dfc5cf | 2629 | int i; |
3fa2a1df | 2630 | |
d7dfc5cf | 2631 | for (i = 0; i < vi->max_queue_pairs; i++) { |
a520794b | 2632 | u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops; |
d7dfc5cf TM |
2633 | struct receive_queue *rq = &vi->rq[i]; |
2634 | struct send_queue *sq = &vi->sq[i]; | |
3fa2a1df | 2635 | |
2636 | do { | |
068c38ad | 2637 | start = u64_stats_fetch_begin(&sq->stats.syncp); |
61217d8f ED |
2638 | tpackets = u64_stats_read(&sq->stats.packets); |
2639 | tbytes = u64_stats_read(&sq->stats.bytes); | |
2640 | terrors = u64_stats_read(&sq->stats.tx_timeouts); | |
068c38ad | 2641 | } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); |
83a27052 ED |
2642 | |
2643 | do { | |
068c38ad | 2644 | start = u64_stats_fetch_begin(&rq->stats.syncp); |
61217d8f ED |
2645 | rpackets = u64_stats_read(&rq->stats.packets); |
2646 | rbytes = u64_stats_read(&rq->stats.bytes); | |
2647 | rdrops = u64_stats_read(&rq->stats.drops); | |
068c38ad | 2648 | } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); |
3fa2a1df | 2649 | |
2650 | tot->rx_packets += rpackets; | |
2651 | tot->tx_packets += tpackets; | |
2652 | tot->rx_bytes += rbytes; | |
2653 | tot->tx_bytes += tbytes; | |
2c4a2f7d | 2654 | tot->rx_dropped += rdrops; |
a520794b | 2655 | tot->tx_errors += terrors; |
3fa2a1df | 2656 | } |
2657 | ||
d12a26b7 ED |
2658 | tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped); |
2659 | tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors); | |
2660 | tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors); | |
2661 | tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors); | |
3fa2a1df | 2662 | } |
2663 | ||
586d17c5 JW |
2664 | static void virtnet_ack_link_announce(struct virtnet_info *vi) |
2665 | { | |
2666 | rtnl_lock(); | |
2667 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, | |
d24bae32 | 2668 | VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) |
586d17c5 JW |
2669 | dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); |
2670 | rtnl_unlock(); | |
2671 | } | |
2672 | ||
47315329 | 2673 | static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) |
986a4f4d JW |
2674 | { |
2675 | struct scatterlist sg; | |
986a4f4d JW |
2676 | struct net_device *dev = vi->dev; |
2677 | ||
2678 | if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) | |
2679 | return 0; | |
2680 | ||
12e57169 MT |
2681 | vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); |
2682 | sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); | |
986a4f4d JW |
2683 | |
2684 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, | |
d24bae32 | 2685 | VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { |
986a4f4d JW |
2686 | dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", |
2687 | queue_pairs); | |
2688 | return -EINVAL; | |
55257d72 | 2689 | } else { |
986a4f4d | 2690 | vi->curr_queue_pairs = queue_pairs; |
35ed159b JW |
2691 | /* virtnet_open() will refill when device is going to up. */ |
2692 | if (dev->flags & IFF_UP) | |
2693 | schedule_delayed_work(&vi->refill, 0); | |
55257d72 | 2694 | } |
986a4f4d JW |
2695 | |
2696 | return 0; | |
2697 | } | |
2698 | ||
47315329 JF |
2699 | static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) |
2700 | { | |
2701 | int err; | |
2702 | ||
2703 | rtnl_lock(); | |
2704 | err = _virtnet_set_queues(vi, queue_pairs); | |
2705 | rtnl_unlock(); | |
2706 | return err; | |
2707 | } | |
2708 | ||
296f96fc RR |
2709 | static int virtnet_close(struct net_device *dev) |
2710 | { | |
2711 | struct virtnet_info *vi = netdev_priv(dev); | |
986a4f4d | 2712 | int i; |
296f96fc | 2713 | |
5a159128 JW |
2714 | /* Make sure NAPI doesn't schedule refill work */ |
2715 | disable_delayed_refill(vi); | |
b2baed69 RR |
2716 | /* Make sure refill_work doesn't re-enable napi! */ |
2717 | cancel_delayed_work_sync(&vi->refill); | |
986a4f4d | 2718 | |
62087995 | 2719 | for (i = 0; i < vi->max_queue_pairs; i++) { |
5306623a | 2720 | virtnet_disable_queue_pair(vi, i); |
62087995 HQ |
2721 | cancel_work_sync(&vi->rq[i].dim.work); |
2722 | } | |
296f96fc | 2723 | |
296f96fc RR |
2724 | return 0; |
2725 | } | |
2726 | ||
b9f74252 | 2727 | static void virtnet_rx_mode_work(struct work_struct *work) |
2af7698e | 2728 | { |
b9f74252 JW |
2729 | struct virtnet_info *vi = |
2730 | container_of(work, struct virtnet_info, rx_mode_work); | |
2731 | struct net_device *dev = vi->dev; | |
f565a7c2 | 2732 | struct scatterlist sg[2]; |
f565a7c2 | 2733 | struct virtio_net_ctrl_mac *mac_data; |
ccffad25 | 2734 | struct netdev_hw_addr *ha; |
32e7bfc4 | 2735 | int uc_count; |
4cd24eaf | 2736 | int mc_count; |
f565a7c2 AW |
2737 | void *buf; |
2738 | int i; | |
2af7698e | 2739 | |
788a8b6d | 2740 | /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ |
2af7698e AW |
2741 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) |
2742 | return; | |
2743 | ||
b9f74252 JW |
2744 | rtnl_lock(); |
2745 | ||
12e57169 MT |
2746 | vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); |
2747 | vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); | |
2af7698e | 2748 | |
12e57169 | 2749 | sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); |
2af7698e AW |
2750 | |
2751 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, | |
d24bae32 | 2752 | VIRTIO_NET_CTRL_RX_PROMISC, sg)) |
2af7698e | 2753 | dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", |
12e57169 | 2754 | vi->ctrl->promisc ? "en" : "dis"); |
2af7698e | 2755 | |
12e57169 | 2756 | sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); |
2af7698e AW |
2757 | |
2758 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, | |
d24bae32 | 2759 | VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) |
2af7698e | 2760 | dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", |
12e57169 | 2761 | vi->ctrl->allmulti ? "en" : "dis"); |
f565a7c2 | 2762 | |
b9f74252 JW |
2763 | netif_addr_lock_bh(dev); |
2764 | ||
32e7bfc4 | 2765 | uc_count = netdev_uc_count(dev); |
4cd24eaf | 2766 | mc_count = netdev_mc_count(dev); |
f565a7c2 | 2767 | /* MAC filter - use one buffer for both lists */ |
4cd24eaf JP |
2768 | buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + |
2769 | (2 * sizeof(mac_data->entries)), GFP_ATOMIC); | |
2770 | mac_data = buf; | |
b9f74252 JW |
2771 | if (!buf) { |
2772 | netif_addr_unlock_bh(dev); | |
2773 | rtnl_unlock(); | |
f565a7c2 | 2774 | return; |
b9f74252 | 2775 | } |
f565a7c2 | 2776 | |
23e258e1 AW |
2777 | sg_init_table(sg, 2); |
2778 | ||
f565a7c2 | 2779 | /* Store the unicast list and count in the front of the buffer */ |
fdd819b2 | 2780 | mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); |
ccffad25 | 2781 | i = 0; |
32e7bfc4 | 2782 | netdev_for_each_uc_addr(ha, dev) |
ccffad25 | 2783 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
f565a7c2 AW |
2784 | |
2785 | sg_set_buf(&sg[0], mac_data, | |
32e7bfc4 | 2786 | sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); |
f565a7c2 AW |
2787 | |
2788 | /* multicast list and count fill the end */ | |
32e7bfc4 | 2789 | mac_data = (void *)&mac_data->macs[uc_count][0]; |
f565a7c2 | 2790 | |
fdd819b2 | 2791 | mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); |
567ec874 | 2792 | i = 0; |
22bedad3 JP |
2793 | netdev_for_each_mc_addr(ha, dev) |
2794 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); | |
f565a7c2 | 2795 | |
b9f74252 JW |
2796 | netif_addr_unlock_bh(dev); |
2797 | ||
f565a7c2 | 2798 | sg_set_buf(&sg[1], mac_data, |
4cd24eaf | 2799 | sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); |
f565a7c2 AW |
2800 | |
2801 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, | |
d24bae32 | 2802 | VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) |
99e872ae | 2803 | dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); |
f565a7c2 | 2804 | |
b9f74252 JW |
2805 | rtnl_unlock(); |
2806 | ||
f565a7c2 | 2807 | kfree(buf); |
2af7698e AW |
2808 | } |
2809 | ||
b9f74252 JW |
2810 | static void virtnet_set_rx_mode(struct net_device *dev) |
2811 | { | |
2812 | struct virtnet_info *vi = netdev_priv(dev); | |
2813 | ||
2814 | if (vi->rx_mode_work_enabled) | |
2815 | schedule_work(&vi->rx_mode_work); | |
2816 | } | |
2817 | ||
80d5c368 PM |
2818 | static int virtnet_vlan_rx_add_vid(struct net_device *dev, |
2819 | __be16 proto, u16 vid) | |
0bde9569 AW |
2820 | { |
2821 | struct virtnet_info *vi = netdev_priv(dev); | |
2822 | struct scatterlist sg; | |
2823 | ||
d7fad4c8 | 2824 | vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); |
12e57169 | 2825 | sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); |
0bde9569 AW |
2826 | |
2827 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, | |
d24bae32 | 2828 | VIRTIO_NET_CTRL_VLAN_ADD, &sg)) |
0bde9569 | 2829 | dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); |
8e586137 | 2830 | return 0; |
0bde9569 AW |
2831 | } |
2832 | ||
80d5c368 PM |
2833 | static int virtnet_vlan_rx_kill_vid(struct net_device *dev, |
2834 | __be16 proto, u16 vid) | |
0bde9569 AW |
2835 | { |
2836 | struct virtnet_info *vi = netdev_priv(dev); | |
2837 | struct scatterlist sg; | |
2838 | ||
d7fad4c8 | 2839 | vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); |
12e57169 | 2840 | sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); |
0bde9569 AW |
2841 | |
2842 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, | |
d24bae32 | 2843 | VIRTIO_NET_CTRL_VLAN_DEL, &sg)) |
0bde9569 | 2844 | dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); |
8e586137 | 2845 | return 0; |
0bde9569 AW |
2846 | } |
2847 | ||
310974fa | 2848 | static void virtnet_clean_affinity(struct virtnet_info *vi) |
986a4f4d JW |
2849 | { |
2850 | int i; | |
2851 | ||
8898c21c WG |
2852 | if (vi->affinity_hint_set) { |
2853 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
19e226e8 CR |
2854 | virtqueue_set_affinity(vi->rq[i].vq, NULL); |
2855 | virtqueue_set_affinity(vi->sq[i].vq, NULL); | |
47be2479 WG |
2856 | } |
2857 | ||
8898c21c WG |
2858 | vi->affinity_hint_set = false; |
2859 | } | |
8898c21c | 2860 | } |
47be2479 | 2861 | |
8898c21c WG |
2862 | static void virtnet_set_affinity(struct virtnet_info *vi) |
2863 | { | |
2ca653d6 CR |
2864 | cpumask_var_t mask; |
2865 | int stragglers; | |
2866 | int group_size; | |
2867 | int i, j, cpu; | |
2868 | int num_cpu; | |
2869 | int stride; | |
2870 | ||
2871 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { | |
310974fa | 2872 | virtnet_clean_affinity(vi); |
8898c21c | 2873 | return; |
986a4f4d JW |
2874 | } |
2875 | ||
2ca653d6 CR |
2876 | num_cpu = num_online_cpus(); |
2877 | stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1); | |
2878 | stragglers = num_cpu >= vi->curr_queue_pairs ? | |
2879 | num_cpu % vi->curr_queue_pairs : | |
2880 | 0; | |
9b51d9d8 | 2881 | cpu = cpumask_first(cpu_online_mask); |
4d99f660 | 2882 | |
2ca653d6 CR |
2883 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
2884 | group_size = stride + (i < stragglers ? 1 : 0); | |
2885 | ||
2886 | for (j = 0; j < group_size; j++) { | |
2887 | cpumask_set_cpu(cpu, mask); | |
2888 | cpu = cpumask_next_wrap(cpu, cpu_online_mask, | |
2889 | nr_cpu_ids, false); | |
2890 | } | |
2891 | virtqueue_set_affinity(vi->rq[i].vq, mask); | |
2892 | virtqueue_set_affinity(vi->sq[i].vq, mask); | |
044ab86d | 2893 | __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS); |
2ca653d6 | 2894 | cpumask_clear(mask); |
986a4f4d JW |
2895 | } |
2896 | ||
8898c21c | 2897 | vi->affinity_hint_set = true; |
2ca653d6 | 2898 | free_cpumask_var(mask); |
986a4f4d JW |
2899 | } |
2900 | ||
8017c279 | 2901 | static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node) |
8de4b2f3 | 2902 | { |
8017c279 SAS |
2903 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, |
2904 | node); | |
2905 | virtnet_set_affinity(vi); | |
2906 | return 0; | |
2907 | } | |
8de4b2f3 | 2908 | |
8017c279 SAS |
2909 | static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node) |
2910 | { | |
2911 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, | |
2912 | node_dead); | |
2913 | virtnet_set_affinity(vi); | |
2914 | return 0; | |
2915 | } | |
3ab098df | 2916 | |
8017c279 SAS |
2917 | static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node) |
2918 | { | |
2919 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, | |
2920 | node); | |
2921 | ||
310974fa | 2922 | virtnet_clean_affinity(vi); |
8017c279 SAS |
2923 | return 0; |
2924 | } | |
2925 | ||
2926 | static enum cpuhp_state virtionet_online; | |
2927 | ||
2928 | static int virtnet_cpu_notif_add(struct virtnet_info *vi) | |
2929 | { | |
2930 | int ret; | |
2931 | ||
2932 | ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); | |
2933 | if (ret) | |
2934 | return ret; | |
2935 | ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD, | |
2936 | &vi->node_dead); | |
2937 | if (!ret) | |
2938 | return ret; | |
2939 | cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); | |
2940 | return ret; | |
2941 | } | |
2942 | ||
2943 | static void virtnet_cpu_notif_remove(struct virtnet_info *vi) | |
2944 | { | |
2945 | cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); | |
2946 | cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD, | |
2947 | &vi->node_dead); | |
986a4f4d JW |
2948 | } |
2949 | ||
1db43c08 HQ |
2950 | static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi, |
2951 | u16 vqn, u32 max_usecs, u32 max_packets) | |
2952 | { | |
2953 | struct scatterlist sgs; | |
2954 | ||
2955 | vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn); | |
2956 | vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs); | |
2957 | vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets); | |
2958 | sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq)); | |
2959 | ||
2960 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, | |
2961 | VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET, | |
2962 | &sgs)) | |
2963 | return -EINVAL; | |
2964 | ||
2965 | return 0; | |
2966 | } | |
2967 | ||
2968 | static int virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info *vi, | |
2969 | u16 queue, u32 max_usecs, | |
2970 | u32 max_packets) | |
2971 | { | |
2972 | int err; | |
2973 | ||
2974 | err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue), | |
2975 | max_usecs, max_packets); | |
2976 | if (err) | |
2977 | return err; | |
2978 | ||
2979 | vi->rq[queue].intr_coal.max_usecs = max_usecs; | |
2980 | vi->rq[queue].intr_coal.max_packets = max_packets; | |
2981 | ||
2982 | return 0; | |
2983 | } | |
2984 | ||
2985 | static int virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info *vi, | |
2986 | u16 queue, u32 max_usecs, | |
2987 | u32 max_packets) | |
2988 | { | |
2989 | int err; | |
2990 | ||
2991 | err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue), | |
2992 | max_usecs, max_packets); | |
2993 | if (err) | |
2994 | return err; | |
2995 | ||
2996 | vi->sq[queue].intr_coal.max_usecs = max_usecs; | |
2997 | vi->sq[queue].intr_coal.max_packets = max_packets; | |
2998 | ||
2999 | return 0; | |
3000 | } | |
3001 | ||
8f9f4668 | 3002 | static void virtnet_get_ringparam(struct net_device *dev, |
74624944 HC |
3003 | struct ethtool_ringparam *ring, |
3004 | struct kernel_ethtool_ringparam *kernel_ring, | |
3005 | struct netlink_ext_ack *extack) | |
8f9f4668 RJ |
3006 | { |
3007 | struct virtnet_info *vi = netdev_priv(dev); | |
3008 | ||
8597b5dd XZ |
3009 | ring->rx_max_pending = vi->rq[0].vq->num_max; |
3010 | ring->tx_max_pending = vi->sq[0].vq->num_max; | |
3011 | ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); | |
3012 | ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); | |
8f9f4668 RJ |
3013 | } |
3014 | ||
a335b33f XZ |
3015 | static int virtnet_set_ringparam(struct net_device *dev, |
3016 | struct ethtool_ringparam *ring, | |
3017 | struct kernel_ethtool_ringparam *kernel_ring, | |
3018 | struct netlink_ext_ack *extack) | |
3019 | { | |
3020 | struct virtnet_info *vi = netdev_priv(dev); | |
3021 | u32 rx_pending, tx_pending; | |
3022 | struct receive_queue *rq; | |
3023 | struct send_queue *sq; | |
3024 | int i, err; | |
3025 | ||
3026 | if (ring->rx_mini_pending || ring->rx_jumbo_pending) | |
3027 | return -EINVAL; | |
3028 | ||
3029 | rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); | |
3030 | tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); | |
3031 | ||
3032 | if (ring->rx_pending == rx_pending && | |
3033 | ring->tx_pending == tx_pending) | |
3034 | return 0; | |
3035 | ||
3036 | if (ring->rx_pending > vi->rq[0].vq->num_max) | |
3037 | return -EINVAL; | |
3038 | ||
3039 | if (ring->tx_pending > vi->sq[0].vq->num_max) | |
3040 | return -EINVAL; | |
3041 | ||
3042 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
3043 | rq = vi->rq + i; | |
3044 | sq = vi->sq + i; | |
3045 | ||
3046 | if (ring->tx_pending != tx_pending) { | |
3047 | err = virtnet_tx_resize(vi, sq, ring->tx_pending); | |
3048 | if (err) | |
3049 | return err; | |
f61fe5f0 HQ |
3050 | |
3051 | /* Upon disabling and re-enabling a transmit virtqueue, the device must | |
3052 | * set the coalescing parameters of the virtqueue to those configured | |
3053 | * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver | |
3054 | * did not set any TX coalescing parameters, to 0. | |
3055 | */ | |
1db43c08 HQ |
3056 | err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i, |
3057 | vi->intr_coal_tx.max_usecs, | |
3058 | vi->intr_coal_tx.max_packets); | |
f61fe5f0 HQ |
3059 | if (err) |
3060 | return err; | |
a335b33f XZ |
3061 | } |
3062 | ||
3063 | if (ring->rx_pending != rx_pending) { | |
3064 | err = virtnet_rx_resize(vi, rq, ring->rx_pending); | |
3065 | if (err) | |
3066 | return err; | |
f61fe5f0 HQ |
3067 | |
3068 | /* The reason is same as the transmit virtqueue reset */ | |
1db43c08 HQ |
3069 | err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i, |
3070 | vi->intr_coal_rx.max_usecs, | |
3071 | vi->intr_coal_rx.max_packets); | |
f61fe5f0 HQ |
3072 | if (err) |
3073 | return err; | |
a335b33f XZ |
3074 | } |
3075 | } | |
3076 | ||
3077 | return 0; | |
8f9f4668 RJ |
3078 | } |
3079 | ||
c7114b12 AM |
3080 | static bool virtnet_commit_rss_command(struct virtnet_info *vi) |
3081 | { | |
3082 | struct net_device *dev = vi->dev; | |
3083 | struct scatterlist sgs[4]; | |
3084 | unsigned int sg_buf_size; | |
3085 | ||
3086 | /* prepare sgs */ | |
3087 | sg_init_table(sgs, 4); | |
3088 | ||
3089 | sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table); | |
3090 | sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size); | |
3091 | ||
3092 | sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1); | |
3093 | sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size); | |
3094 | ||
3095 | sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key) | |
3096 | - offsetof(struct virtio_net_ctrl_rss, max_tx_vq); | |
3097 | sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size); | |
3098 | ||
3099 | sg_buf_size = vi->rss_key_size; | |
3100 | sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size); | |
3101 | ||
3102 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, | |
91f41f01 AM |
3103 | vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG |
3104 | : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) { | |
c7114b12 AM |
3105 | dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n"); |
3106 | return false; | |
3107 | } | |
3108 | return true; | |
3109 | } | |
3110 | ||
3111 | static void virtnet_init_default_rss(struct virtnet_info *vi) | |
3112 | { | |
3113 | u32 indir_val = 0; | |
3114 | int i = 0; | |
3115 | ||
3116 | vi->ctrl->rss.hash_types = vi->rss_hash_types_supported; | |
c1170820 | 3117 | vi->rss_hash_types_saved = vi->rss_hash_types_supported; |
c7114b12 AM |
3118 | vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size |
3119 | ? vi->rss_indir_table_size - 1 : 0; | |
3120 | vi->ctrl->rss.unclassified_queue = 0; | |
3121 | ||
3122 | for (; i < vi->rss_indir_table_size; ++i) { | |
3123 | indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs); | |
3124 | vi->ctrl->rss.indirection_table[i] = indir_val; | |
3125 | } | |
3126 | ||
2c507ce9 | 3127 | vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0; |
c7114b12 AM |
3128 | vi->ctrl->rss.hash_key_length = vi->rss_key_size; |
3129 | ||
3130 | netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size); | |
3131 | } | |
3132 | ||
c1170820 AM |
3133 | static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info) |
3134 | { | |
3135 | info->data = 0; | |
3136 | switch (info->flow_type) { | |
3137 | case TCP_V4_FLOW: | |
3138 | if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) { | |
3139 | info->data = RXH_IP_SRC | RXH_IP_DST | | |
3140 | RXH_L4_B_0_1 | RXH_L4_B_2_3; | |
3141 | } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { | |
3142 | info->data = RXH_IP_SRC | RXH_IP_DST; | |
3143 | } | |
3144 | break; | |
3145 | case TCP_V6_FLOW: | |
3146 | if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) { | |
3147 | info->data = RXH_IP_SRC | RXH_IP_DST | | |
3148 | RXH_L4_B_0_1 | RXH_L4_B_2_3; | |
3149 | } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { | |
3150 | info->data = RXH_IP_SRC | RXH_IP_DST; | |
3151 | } | |
3152 | break; | |
3153 | case UDP_V4_FLOW: | |
3154 | if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) { | |
3155 | info->data = RXH_IP_SRC | RXH_IP_DST | | |
3156 | RXH_L4_B_0_1 | RXH_L4_B_2_3; | |
3157 | } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { | |
3158 | info->data = RXH_IP_SRC | RXH_IP_DST; | |
3159 | } | |
3160 | break; | |
3161 | case UDP_V6_FLOW: | |
3162 | if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) { | |
3163 | info->data = RXH_IP_SRC | RXH_IP_DST | | |
3164 | RXH_L4_B_0_1 | RXH_L4_B_2_3; | |
3165 | } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { | |
3166 | info->data = RXH_IP_SRC | RXH_IP_DST; | |
3167 | } | |
3168 | break; | |
3169 | case IPV4_FLOW: | |
3170 | if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) | |
3171 | info->data = RXH_IP_SRC | RXH_IP_DST; | |
3172 | ||
3173 | break; | |
3174 | case IPV6_FLOW: | |
3175 | if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) | |
3176 | info->data = RXH_IP_SRC | RXH_IP_DST; | |
3177 | ||
3178 | break; | |
3179 | default: | |
3180 | info->data = 0; | |
3181 | break; | |
3182 | } | |
3183 | } | |
3184 | ||
3185 | static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info) | |
3186 | { | |
3187 | u32 new_hashtypes = vi->rss_hash_types_saved; | |
3188 | bool is_disable = info->data & RXH_DISCARD; | |
3189 | bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3); | |
3190 | ||
3191 | /* supports only 'sd', 'sdfn' and 'r' */ | |
3192 | if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable)) | |
3193 | return false; | |
3194 | ||
3195 | switch (info->flow_type) { | |
3196 | case TCP_V4_FLOW: | |
3197 | new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4); | |
3198 | if (!is_disable) | |
3199 | new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4 | |
3200 | | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0); | |
3201 | break; | |
3202 | case UDP_V4_FLOW: | |
3203 | new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4); | |
3204 | if (!is_disable) | |
3205 | new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4 | |
3206 | | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0); | |
3207 | break; | |
3208 | case IPV4_FLOW: | |
3209 | new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4; | |
3210 | if (!is_disable) | |
3211 | new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4; | |
3212 | break; | |
3213 | case TCP_V6_FLOW: | |
3214 | new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6); | |
3215 | if (!is_disable) | |
3216 | new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6 | |
3217 | | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0); | |
3218 | break; | |
3219 | case UDP_V6_FLOW: | |
3220 | new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6); | |
3221 | if (!is_disable) | |
3222 | new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6 | |
3223 | | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0); | |
3224 | break; | |
3225 | case IPV6_FLOW: | |
3226 | new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6; | |
3227 | if (!is_disable) | |
3228 | new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6; | |
3229 | break; | |
3230 | default: | |
3231 | /* unsupported flow */ | |
3232 | return false; | |
3233 | } | |
3234 | ||
3235 | /* if unsupported hashtype was set */ | |
3236 | if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported)) | |
3237 | return false; | |
3238 | ||
3239 | if (new_hashtypes != vi->rss_hash_types_saved) { | |
3240 | vi->rss_hash_types_saved = new_hashtypes; | |
3241 | vi->ctrl->rss.hash_types = vi->rss_hash_types_saved; | |
3242 | if (vi->dev->features & NETIF_F_RXHASH) | |
3243 | return virtnet_commit_rss_command(vi); | |
3244 | } | |
3245 | ||
3246 | return true; | |
3247 | } | |
66846048 RJ |
3248 | |
3249 | static void virtnet_get_drvinfo(struct net_device *dev, | |
3250 | struct ethtool_drvinfo *info) | |
3251 | { | |
3252 | struct virtnet_info *vi = netdev_priv(dev); | |
3253 | struct virtio_device *vdev = vi->vdev; | |
3254 | ||
fb3ceec1 WS |
3255 | strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); |
3256 | strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); | |
3257 | strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); | |
66846048 RJ |
3258 | |
3259 | } | |
3260 | ||
d73bcd2c JW |
3261 | /* TODO: Eliminate OOO packets during switching */ |
3262 | static int virtnet_set_channels(struct net_device *dev, | |
3263 | struct ethtool_channels *channels) | |
3264 | { | |
3265 | struct virtnet_info *vi = netdev_priv(dev); | |
3266 | u16 queue_pairs = channels->combined_count; | |
3267 | int err; | |
3268 | ||
3269 | /* We don't support separate rx/tx channels. | |
3270 | * We don't allow setting 'other' channels. | |
3271 | */ | |
3272 | if (channels->rx_count || channels->tx_count || channels->other_count) | |
3273 | return -EINVAL; | |
3274 | ||
c18e9cd6 | 3275 | if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) |
d73bcd2c JW |
3276 | return -EINVAL; |
3277 | ||
f600b690 JF |
3278 | /* For now we don't support modifying channels while XDP is loaded |
3279 | * also when XDP is loaded all RX queues have XDP programs so we only | |
3280 | * need to check a single RX queue. | |
3281 | */ | |
3282 | if (vi->rq[0].xdp_prog) | |
3283 | return -EINVAL; | |
3284 | ||
a0d1d0f4 | 3285 | cpus_read_lock(); |
47315329 | 3286 | err = _virtnet_set_queues(vi, queue_pairs); |
de33212f | 3287 | if (err) { |
a0d1d0f4 | 3288 | cpus_read_unlock(); |
de33212f | 3289 | goto err; |
d73bcd2c | 3290 | } |
de33212f | 3291 | virtnet_set_affinity(vi); |
a0d1d0f4 | 3292 | cpus_read_unlock(); |
d73bcd2c | 3293 | |
de33212f JD |
3294 | netif_set_real_num_tx_queues(dev, queue_pairs); |
3295 | netif_set_real_num_rx_queues(dev, queue_pairs); | |
3296 | err: | |
d73bcd2c JW |
3297 | return err; |
3298 | } | |
3299 | ||
d7dfc5cf TM |
3300 | static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data) |
3301 | { | |
3302 | struct virtnet_info *vi = netdev_priv(dev); | |
d7dfc5cf | 3303 | unsigned int i, j; |
d7a9a01b | 3304 | u8 *p = data; |
d7dfc5cf TM |
3305 | |
3306 | switch (stringset) { | |
3307 | case ETH_SS_STATS: | |
3308 | for (i = 0; i < vi->curr_queue_pairs; i++) { | |
d7a9a01b AD |
3309 | for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) |
3310 | ethtool_sprintf(&p, "rx_queue_%u_%s", i, | |
3311 | virtnet_rq_stats_desc[j].desc); | |
d7dfc5cf TM |
3312 | } |
3313 | ||
3314 | for (i = 0; i < vi->curr_queue_pairs; i++) { | |
d7a9a01b AD |
3315 | for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) |
3316 | ethtool_sprintf(&p, "tx_queue_%u_%s", i, | |
3317 | virtnet_sq_stats_desc[j].desc); | |
d7dfc5cf TM |
3318 | } |
3319 | break; | |
3320 | } | |
3321 | } | |
3322 | ||
3323 | static int virtnet_get_sset_count(struct net_device *dev, int sset) | |
3324 | { | |
3325 | struct virtnet_info *vi = netdev_priv(dev); | |
3326 | ||
3327 | switch (sset) { | |
3328 | case ETH_SS_STATS: | |
3329 | return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN + | |
3330 | VIRTNET_SQ_STATS_LEN); | |
3331 | default: | |
3332 | return -EOPNOTSUPP; | |
3333 | } | |
3334 | } | |
3335 | ||
3336 | static void virtnet_get_ethtool_stats(struct net_device *dev, | |
3337 | struct ethtool_stats *stats, u64 *data) | |
3338 | { | |
3339 | struct virtnet_info *vi = netdev_priv(dev); | |
3340 | unsigned int idx = 0, start, i, j; | |
3341 | const u8 *stats_base; | |
61217d8f | 3342 | const u64_stats_t *p; |
d7dfc5cf TM |
3343 | size_t offset; |
3344 | ||
3345 | for (i = 0; i < vi->curr_queue_pairs; i++) { | |
3346 | struct receive_queue *rq = &vi->rq[i]; | |
3347 | ||
61217d8f | 3348 | stats_base = (const u8 *)&rq->stats; |
d7dfc5cf | 3349 | do { |
068c38ad | 3350 | start = u64_stats_fetch_begin(&rq->stats.syncp); |
d7dfc5cf TM |
3351 | for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { |
3352 | offset = virtnet_rq_stats_desc[j].offset; | |
61217d8f ED |
3353 | p = (const u64_stats_t *)(stats_base + offset); |
3354 | data[idx + j] = u64_stats_read(p); | |
d7dfc5cf | 3355 | } |
068c38ad | 3356 | } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); |
d7dfc5cf TM |
3357 | idx += VIRTNET_RQ_STATS_LEN; |
3358 | } | |
3359 | ||
3360 | for (i = 0; i < vi->curr_queue_pairs; i++) { | |
3361 | struct send_queue *sq = &vi->sq[i]; | |
3362 | ||
61217d8f | 3363 | stats_base = (const u8 *)&sq->stats; |
d7dfc5cf | 3364 | do { |
068c38ad | 3365 | start = u64_stats_fetch_begin(&sq->stats.syncp); |
d7dfc5cf TM |
3366 | for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { |
3367 | offset = virtnet_sq_stats_desc[j].offset; | |
61217d8f ED |
3368 | p = (const u64_stats_t *)(stats_base + offset); |
3369 | data[idx + j] = u64_stats_read(p); | |
d7dfc5cf | 3370 | } |
068c38ad | 3371 | } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); |
d7dfc5cf TM |
3372 | idx += VIRTNET_SQ_STATS_LEN; |
3373 | } | |
3374 | } | |
3375 | ||
d73bcd2c JW |
3376 | static void virtnet_get_channels(struct net_device *dev, |
3377 | struct ethtool_channels *channels) | |
3378 | { | |
3379 | struct virtnet_info *vi = netdev_priv(dev); | |
3380 | ||
3381 | channels->combined_count = vi->curr_queue_pairs; | |
3382 | channels->max_combined = vi->max_queue_pairs; | |
3383 | channels->max_other = 0; | |
3384 | channels->rx_count = 0; | |
3385 | channels->tx_count = 0; | |
3386 | channels->other_count = 0; | |
3387 | } | |
3388 | ||
ebb6b4b1 PR |
3389 | static int virtnet_set_link_ksettings(struct net_device *dev, |
3390 | const struct ethtool_link_ksettings *cmd) | |
16032be5 NA |
3391 | { |
3392 | struct virtnet_info *vi = netdev_priv(dev); | |
16032be5 | 3393 | |
9aedc6e2 CF |
3394 | return ethtool_virtdev_set_link_ksettings(dev, cmd, |
3395 | &vi->speed, &vi->duplex); | |
16032be5 NA |
3396 | } |
3397 | ||
ebb6b4b1 PR |
3398 | static int virtnet_get_link_ksettings(struct net_device *dev, |
3399 | struct ethtool_link_ksettings *cmd) | |
16032be5 NA |
3400 | { |
3401 | struct virtnet_info *vi = netdev_priv(dev); | |
3402 | ||
ebb6b4b1 PR |
3403 | cmd->base.speed = vi->speed; |
3404 | cmd->base.duplex = vi->duplex; | |
3405 | cmd->base.port = PORT_OTHER; | |
16032be5 NA |
3406 | |
3407 | return 0; | |
3408 | } | |
3409 | ||
d7180080 HQ |
3410 | static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi, |
3411 | struct ethtool_coalesce *ec) | |
699b045a | 3412 | { |
d7180080 | 3413 | struct scatterlist sgs_tx; |
e9420838 | 3414 | int i; |
699b045a | 3415 | |
accc1bf2 BC |
3416 | vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); |
3417 | vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); | |
3418 | sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx)); | |
699b045a AK |
3419 | |
3420 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, | |
3421 | VIRTIO_NET_CTRL_NOTF_COAL_TX_SET, | |
3422 | &sgs_tx)) | |
3423 | return -EINVAL; | |
3424 | ||
308d7982 GL |
3425 | vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs; |
3426 | vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames; | |
e9420838 HQ |
3427 | for (i = 0; i < vi->max_queue_pairs; i++) { |
3428 | vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs; | |
3429 | vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames; | |
3430 | } | |
699b045a | 3431 | |
d7180080 HQ |
3432 | return 0; |
3433 | } | |
3434 | ||
3435 | static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, | |
3436 | struct ethtool_coalesce *ec) | |
3437 | { | |
62087995 | 3438 | bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; |
d7180080 HQ |
3439 | struct scatterlist sgs_rx; |
3440 | int i; | |
3441 | ||
62087995 HQ |
3442 | if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) |
3443 | return -EOPNOTSUPP; | |
3444 | ||
3445 | if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs || | |
3446 | ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets)) | |
3447 | return -EINVAL; | |
3448 | ||
3449 | if (rx_ctrl_dim_on && !vi->rx_dim_enabled) { | |
3450 | vi->rx_dim_enabled = true; | |
3451 | for (i = 0; i < vi->max_queue_pairs; i++) | |
3452 | vi->rq[i].dim_enabled = true; | |
3453 | return 0; | |
3454 | } | |
3455 | ||
3456 | if (!rx_ctrl_dim_on && vi->rx_dim_enabled) { | |
3457 | vi->rx_dim_enabled = false; | |
3458 | for (i = 0; i < vi->max_queue_pairs; i++) | |
3459 | vi->rq[i].dim_enabled = false; | |
3460 | } | |
3461 | ||
3462 | /* Since the per-queue coalescing params can be set, | |
3463 | * we need apply the global new params even if they | |
3464 | * are not updated. | |
3465 | */ | |
accc1bf2 BC |
3466 | vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); |
3467 | vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); | |
3468 | sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx)); | |
699b045a AK |
3469 | |
3470 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, | |
3471 | VIRTIO_NET_CTRL_NOTF_COAL_RX_SET, | |
3472 | &sgs_rx)) | |
3473 | return -EINVAL; | |
3474 | ||
308d7982 GL |
3475 | vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs; |
3476 | vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames; | |
e9420838 HQ |
3477 | for (i = 0; i < vi->max_queue_pairs; i++) { |
3478 | vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs; | |
3479 | vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames; | |
3480 | } | |
699b045a AK |
3481 | |
3482 | return 0; | |
3483 | } | |
3484 | ||
d7180080 HQ |
3485 | static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, |
3486 | struct ethtool_coalesce *ec) | |
3487 | { | |
3488 | int err; | |
3489 | ||
3490 | err = virtnet_send_tx_notf_coal_cmds(vi, ec); | |
3491 | if (err) | |
3492 | return err; | |
3493 | ||
3494 | err = virtnet_send_rx_notf_coal_cmds(vi, ec); | |
3495 | if (err) | |
3496 | return err; | |
3497 | ||
3498 | return 0; | |
3499 | } | |
3500 | ||
62087995 HQ |
3501 | static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi, |
3502 | struct ethtool_coalesce *ec, | |
3503 | u16 queue) | |
394bd877 | 3504 | { |
62087995 HQ |
3505 | bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; |
3506 | bool cur_rx_dim = vi->rq[queue].dim_enabled; | |
3507 | u32 max_usecs, max_packets; | |
394bd877 GL |
3508 | int err; |
3509 | ||
62087995 HQ |
3510 | max_usecs = vi->rq[queue].intr_coal.max_usecs; |
3511 | max_packets = vi->rq[queue].intr_coal.max_packets; | |
3512 | ||
3513 | if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs || | |
3514 | ec->rx_max_coalesced_frames != max_packets)) | |
3515 | return -EINVAL; | |
3516 | ||
3517 | if (rx_ctrl_dim_on && !cur_rx_dim) { | |
3518 | vi->rq[queue].dim_enabled = true; | |
3519 | return 0; | |
3520 | } | |
3521 | ||
3522 | if (!rx_ctrl_dim_on && cur_rx_dim) | |
3523 | vi->rq[queue].dim_enabled = false; | |
3524 | ||
3525 | /* If no params are updated, userspace ethtool will | |
3526 | * reject the modification. | |
3527 | */ | |
1db43c08 HQ |
3528 | err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue, |
3529 | ec->rx_coalesce_usecs, | |
3530 | ec->rx_max_coalesced_frames); | |
bfb2b360 HQ |
3531 | if (err) |
3532 | return err; | |
394bd877 | 3533 | |
62087995 HQ |
3534 | return 0; |
3535 | } | |
3536 | ||
3537 | static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi, | |
3538 | struct ethtool_coalesce *ec, | |
3539 | u16 queue) | |
3540 | { | |
3541 | int err; | |
3542 | ||
3543 | err = virtnet_send_rx_notf_coal_vq_cmds(vi, ec, queue); | |
3544 | if (err) | |
3545 | return err; | |
3546 | ||
1db43c08 HQ |
3547 | err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, queue, |
3548 | ec->tx_coalesce_usecs, | |
3549 | ec->tx_max_coalesced_frames); | |
bfb2b360 HQ |
3550 | if (err) |
3551 | return err; | |
3552 | ||
394bd877 GL |
3553 | return 0; |
3554 | } | |
3555 | ||
62087995 HQ |
3556 | static void virtnet_rx_dim_work(struct work_struct *work) |
3557 | { | |
3558 | struct dim *dim = container_of(work, struct dim, work); | |
3559 | struct receive_queue *rq = container_of(dim, | |
3560 | struct receive_queue, dim); | |
3561 | struct virtnet_info *vi = rq->vq->vdev->priv; | |
3562 | struct net_device *dev = vi->dev; | |
3563 | struct dim_cq_moder update_moder; | |
3564 | int i, qnum, err; | |
3565 | ||
3566 | if (!rtnl_trylock()) | |
3567 | return; | |
3568 | ||
3569 | /* Each rxq's work is queued by "net_dim()->schedule_work()" | |
3570 | * in response to NAPI traffic changes. Note that dim->profile_ix | |
3571 | * for each rxq is updated prior to the queuing action. | |
3572 | * So we only need to traverse and update profiles for all rxqs | |
3573 | * in the work which is holding rtnl_lock. | |
3574 | */ | |
3575 | for (i = 0; i < vi->curr_queue_pairs; i++) { | |
3576 | rq = &vi->rq[i]; | |
3577 | dim = &rq->dim; | |
3578 | qnum = rq - vi->rq; | |
3579 | ||
3580 | if (!rq->dim_enabled) | |
3581 | continue; | |
3582 | ||
3583 | update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); | |
3584 | if (update_moder.usec != rq->intr_coal.max_usecs || | |
3585 | update_moder.pkts != rq->intr_coal.max_packets) { | |
3586 | err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum, | |
3587 | update_moder.usec, | |
3588 | update_moder.pkts); | |
3589 | if (err) | |
3590 | pr_debug("%s: Failed to send dim parameters on rxq%d\n", | |
3591 | dev->name, qnum); | |
3592 | dim->state = DIM_START_MEASURE; | |
3593 | } | |
3594 | } | |
3595 | ||
3596 | rtnl_unlock(); | |
3597 | } | |
3598 | ||
699b045a AK |
3599 | static int virtnet_coal_params_supported(struct ethtool_coalesce *ec) |
3600 | { | |
3601 | /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL | |
c4e33cf2 | 3602 | * or VIRTIO_NET_F_VQ_NOTF_COAL feature is negotiated. |
699b045a AK |
3603 | */ |
3604 | if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs) | |
3605 | return -EOPNOTSUPP; | |
3606 | ||
3607 | if (ec->tx_max_coalesced_frames > 1 || | |
3608 | ec->rx_max_coalesced_frames != 1) | |
3609 | return -EINVAL; | |
3610 | ||
3611 | return 0; | |
3612 | } | |
3613 | ||
394bd877 GL |
3614 | static int virtnet_should_update_vq_weight(int dev_flags, int weight, |
3615 | int vq_weight, bool *should_update) | |
3616 | { | |
3617 | if (weight ^ vq_weight) { | |
3618 | if (dev_flags & IFF_UP) | |
3619 | return -EBUSY; | |
3620 | *should_update = true; | |
3621 | } | |
3622 | ||
3623 | return 0; | |
3624 | } | |
3625 | ||
0c465be1 | 3626 | static int virtnet_set_coalesce(struct net_device *dev, |
f3ccfda1 YM |
3627 | struct ethtool_coalesce *ec, |
3628 | struct kernel_ethtool_coalesce *kernel_coal, | |
3629 | struct netlink_ext_ack *extack) | |
0c465be1 | 3630 | { |
0c465be1 | 3631 | struct virtnet_info *vi = netdev_priv(dev); |
394bd877 | 3632 | int ret, queue_number, napi_weight; |
699b045a | 3633 | bool update_napi = false; |
0c465be1 | 3634 | |
699b045a | 3635 | /* Can't change NAPI weight if the link is up */ |
0c465be1 | 3636 | napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; |
394bd877 GL |
3637 | for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) { |
3638 | ret = virtnet_should_update_vq_weight(dev->flags, napi_weight, | |
3639 | vi->sq[queue_number].napi.weight, | |
3640 | &update_napi); | |
3641 | if (ret) | |
3642 | return ret; | |
3643 | ||
3644 | if (update_napi) { | |
3645 | /* All queues that belong to [queue_number, vi->max_queue_pairs] will be | |
3646 | * updated for the sake of simplicity, which might not be necessary | |
3647 | */ | |
3648 | break; | |
3649 | } | |
699b045a AK |
3650 | } |
3651 | ||
3652 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) | |
3653 | ret = virtnet_send_notf_coal_cmds(vi, ec); | |
3654 | else | |
3655 | ret = virtnet_coal_params_supported(ec); | |
3656 | ||
3657 | if (ret) | |
3658 | return ret; | |
3659 | ||
3660 | if (update_napi) { | |
394bd877 GL |
3661 | for (; queue_number < vi->max_queue_pairs; queue_number++) |
3662 | vi->sq[queue_number].napi.weight = napi_weight; | |
0c465be1 JW |
3663 | } |
3664 | ||
699b045a | 3665 | return ret; |
0c465be1 JW |
3666 | } |
3667 | ||
3668 | static int virtnet_get_coalesce(struct net_device *dev, | |
f3ccfda1 YM |
3669 | struct ethtool_coalesce *ec, |
3670 | struct kernel_ethtool_coalesce *kernel_coal, | |
3671 | struct netlink_ext_ack *extack) | |
0c465be1 | 3672 | { |
0c465be1 JW |
3673 | struct virtnet_info *vi = netdev_priv(dev); |
3674 | ||
699b045a | 3675 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { |
308d7982 GL |
3676 | ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs; |
3677 | ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs; | |
3678 | ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets; | |
3679 | ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets; | |
62087995 | 3680 | ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled; |
699b045a AK |
3681 | } else { |
3682 | ec->rx_max_coalesced_frames = 1; | |
0c465be1 | 3683 | |
699b045a AK |
3684 | if (vi->sq[0].napi.weight) |
3685 | ec->tx_max_coalesced_frames = 1; | |
3686 | } | |
0c465be1 JW |
3687 | |
3688 | return 0; | |
3689 | } | |
3690 | ||
394bd877 GL |
3691 | static int virtnet_set_per_queue_coalesce(struct net_device *dev, |
3692 | u32 queue, | |
3693 | struct ethtool_coalesce *ec) | |
3694 | { | |
3695 | struct virtnet_info *vi = netdev_priv(dev); | |
3696 | int ret, napi_weight; | |
3697 | bool update_napi = false; | |
3698 | ||
3699 | if (queue >= vi->max_queue_pairs) | |
3700 | return -EINVAL; | |
3701 | ||
3702 | /* Can't change NAPI weight if the link is up */ | |
3703 | napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; | |
3704 | ret = virtnet_should_update_vq_weight(dev->flags, napi_weight, | |
3705 | vi->sq[queue].napi.weight, | |
3706 | &update_napi); | |
3707 | if (ret) | |
3708 | return ret; | |
3709 | ||
3710 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) | |
3711 | ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue); | |
3712 | else | |
3713 | ret = virtnet_coal_params_supported(ec); | |
3714 | ||
3715 | if (ret) | |
3716 | return ret; | |
3717 | ||
3718 | if (update_napi) | |
3719 | vi->sq[queue].napi.weight = napi_weight; | |
3720 | ||
3721 | return 0; | |
3722 | } | |
3723 | ||
3724 | static int virtnet_get_per_queue_coalesce(struct net_device *dev, | |
3725 | u32 queue, | |
3726 | struct ethtool_coalesce *ec) | |
3727 | { | |
3728 | struct virtnet_info *vi = netdev_priv(dev); | |
3729 | ||
3730 | if (queue >= vi->max_queue_pairs) | |
3731 | return -EINVAL; | |
3732 | ||
3733 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { | |
3734 | ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs; | |
3735 | ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs; | |
3736 | ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets; | |
3737 | ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets; | |
62087995 | 3738 | ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled; |
394bd877 GL |
3739 | } else { |
3740 | ec->rx_max_coalesced_frames = 1; | |
3741 | ||
134674c1 | 3742 | if (vi->sq[queue].napi.weight) |
394bd877 GL |
3743 | ec->tx_max_coalesced_frames = 1; |
3744 | } | |
3745 | ||
3746 | return 0; | |
3747 | } | |
3748 | ||
16032be5 NA |
3749 | static void virtnet_init_settings(struct net_device *dev) |
3750 | { | |
3751 | struct virtnet_info *vi = netdev_priv(dev); | |
3752 | ||
3753 | vi->speed = SPEED_UNKNOWN; | |
3754 | vi->duplex = DUPLEX_UNKNOWN; | |
3755 | } | |
3756 | ||
faa9b39f JB |
3757 | static void virtnet_update_settings(struct virtnet_info *vi) |
3758 | { | |
3759 | u32 speed; | |
3760 | u8 duplex; | |
3761 | ||
3762 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) | |
3763 | return; | |
3764 | ||
64ffa39d MT |
3765 | virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); |
3766 | ||
faa9b39f JB |
3767 | if (ethtool_validate_speed(speed)) |
3768 | vi->speed = speed; | |
64ffa39d MT |
3769 | |
3770 | virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); | |
3771 | ||
faa9b39f JB |
3772 | if (ethtool_validate_duplex(duplex)) |
3773 | vi->duplex = duplex; | |
3774 | } | |
3775 | ||
c7114b12 AM |
3776 | static u32 virtnet_get_rxfh_key_size(struct net_device *dev) |
3777 | { | |
3778 | return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size; | |
3779 | } | |
3780 | ||
3781 | static u32 virtnet_get_rxfh_indir_size(struct net_device *dev) | |
3782 | { | |
3783 | return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size; | |
3784 | } | |
3785 | ||
fb6e30a7 AZ |
3786 | static int virtnet_get_rxfh(struct net_device *dev, |
3787 | struct ethtool_rxfh_param *rxfh) | |
c7114b12 AM |
3788 | { |
3789 | struct virtnet_info *vi = netdev_priv(dev); | |
3790 | int i; | |
3791 | ||
fb6e30a7 | 3792 | if (rxfh->indir) { |
c7114b12 | 3793 | for (i = 0; i < vi->rss_indir_table_size; ++i) |
fb6e30a7 | 3794 | rxfh->indir[i] = vi->ctrl->rss.indirection_table[i]; |
c7114b12 AM |
3795 | } |
3796 | ||
fb6e30a7 AZ |
3797 | if (rxfh->key) |
3798 | memcpy(rxfh->key, vi->ctrl->rss.key, vi->rss_key_size); | |
c7114b12 | 3799 | |
fb6e30a7 | 3800 | rxfh->hfunc = ETH_RSS_HASH_TOP; |
c7114b12 AM |
3801 | |
3802 | return 0; | |
3803 | } | |
3804 | ||
fb6e30a7 AZ |
3805 | static int virtnet_set_rxfh(struct net_device *dev, |
3806 | struct ethtool_rxfh_param *rxfh, | |
3807 | struct netlink_ext_ack *extack) | |
c7114b12 AM |
3808 | { |
3809 | struct virtnet_info *vi = netdev_priv(dev); | |
3810 | int i; | |
3811 | ||
fb6e30a7 AZ |
3812 | if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && |
3813 | rxfh->hfunc != ETH_RSS_HASH_TOP) | |
c7114b12 AM |
3814 | return -EOPNOTSUPP; |
3815 | ||
fb6e30a7 | 3816 | if (rxfh->indir) { |
c7114b12 | 3817 | for (i = 0; i < vi->rss_indir_table_size; ++i) |
fb6e30a7 | 3818 | vi->ctrl->rss.indirection_table[i] = rxfh->indir[i]; |
c7114b12 | 3819 | } |
fb6e30a7 AZ |
3820 | if (rxfh->key) |
3821 | memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size); | |
c7114b12 AM |
3822 | |
3823 | virtnet_commit_rss_command(vi); | |
3824 | ||
3825 | return 0; | |
3826 | } | |
3827 | ||
3828 | static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs) | |
3829 | { | |
3830 | struct virtnet_info *vi = netdev_priv(dev); | |
3831 | int rc = 0; | |
3832 | ||
3833 | switch (info->cmd) { | |
3834 | case ETHTOOL_GRXRINGS: | |
3835 | info->data = vi->curr_queue_pairs; | |
c1170820 AM |
3836 | break; |
3837 | case ETHTOOL_GRXFH: | |
3838 | virtnet_get_hashflow(vi, info); | |
3839 | break; | |
3840 | default: | |
3841 | rc = -EOPNOTSUPP; | |
3842 | } | |
3843 | ||
3844 | return rc; | |
3845 | } | |
3846 | ||
3847 | static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) | |
3848 | { | |
3849 | struct virtnet_info *vi = netdev_priv(dev); | |
3850 | int rc = 0; | |
3851 | ||
3852 | switch (info->cmd) { | |
3853 | case ETHTOOL_SRXFH: | |
3854 | if (!virtnet_set_hashflow(vi, info)) | |
3855 | rc = -EINVAL; | |
3856 | ||
c7114b12 AM |
3857 | break; |
3858 | default: | |
3859 | rc = -EOPNOTSUPP; | |
3860 | } | |
3861 | ||
3862 | return rc; | |
3863 | } | |
3864 | ||
0fc0b732 | 3865 | static const struct ethtool_ops virtnet_ethtool_ops = { |
699b045a | 3866 | .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | |
62087995 | 3867 | ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX, |
66846048 | 3868 | .get_drvinfo = virtnet_get_drvinfo, |
9f4d26d0 | 3869 | .get_link = ethtool_op_get_link, |
8f9f4668 | 3870 | .get_ringparam = virtnet_get_ringparam, |
a335b33f | 3871 | .set_ringparam = virtnet_set_ringparam, |
d7dfc5cf TM |
3872 | .get_strings = virtnet_get_strings, |
3873 | .get_sset_count = virtnet_get_sset_count, | |
3874 | .get_ethtool_stats = virtnet_get_ethtool_stats, | |
d73bcd2c JW |
3875 | .set_channels = virtnet_set_channels, |
3876 | .get_channels = virtnet_get_channels, | |
074c3582 | 3877 | .get_ts_info = ethtool_op_get_ts_info, |
ebb6b4b1 PR |
3878 | .get_link_ksettings = virtnet_get_link_ksettings, |
3879 | .set_link_ksettings = virtnet_set_link_ksettings, | |
0c465be1 JW |
3880 | .set_coalesce = virtnet_set_coalesce, |
3881 | .get_coalesce = virtnet_get_coalesce, | |
394bd877 GL |
3882 | .set_per_queue_coalesce = virtnet_set_per_queue_coalesce, |
3883 | .get_per_queue_coalesce = virtnet_get_per_queue_coalesce, | |
c7114b12 AM |
3884 | .get_rxfh_key_size = virtnet_get_rxfh_key_size, |
3885 | .get_rxfh_indir_size = virtnet_get_rxfh_indir_size, | |
3886 | .get_rxfh = virtnet_get_rxfh, | |
3887 | .set_rxfh = virtnet_set_rxfh, | |
3888 | .get_rxnfc = virtnet_get_rxnfc, | |
c1170820 | 3889 | .set_rxnfc = virtnet_set_rxnfc, |
a9ea3fc6 HX |
3890 | }; |
3891 | ||
9fe7bfce JF |
3892 | static void virtnet_freeze_down(struct virtio_device *vdev) |
3893 | { | |
3894 | struct virtnet_info *vi = vdev->priv; | |
9fe7bfce JF |
3895 | |
3896 | /* Make sure no work handler is accessing the device */ | |
3897 | flush_work(&vi->config_work); | |
b9f74252 JW |
3898 | disable_rx_mode_work(vi); |
3899 | flush_work(&vi->rx_mode_work); | |
9fe7bfce | 3900 | |
05c998b7 | 3901 | netif_tx_lock_bh(vi->dev); |
9fe7bfce | 3902 | netif_device_detach(vi->dev); |
05c998b7 | 3903 | netif_tx_unlock_bh(vi->dev); |
8af52fe9 SG |
3904 | if (netif_running(vi->dev)) |
3905 | virtnet_close(vi->dev); | |
9fe7bfce JF |
3906 | } |
3907 | ||
3908 | static int init_vqs(struct virtnet_info *vi); | |
3909 | ||
3910 | static int virtnet_restore_up(struct virtio_device *vdev) | |
3911 | { | |
3912 | struct virtnet_info *vi = vdev->priv; | |
8af52fe9 | 3913 | int err; |
9fe7bfce JF |
3914 | |
3915 | err = init_vqs(vi); | |
3916 | if (err) | |
3917 | return err; | |
3918 | ||
3919 | virtio_device_ready(vdev); | |
3920 | ||
5a159128 | 3921 | enable_delayed_refill(vi); |
b9f74252 | 3922 | enable_rx_mode_work(vi); |
5a159128 | 3923 | |
9fe7bfce | 3924 | if (netif_running(vi->dev)) { |
8af52fe9 SG |
3925 | err = virtnet_open(vi->dev); |
3926 | if (err) | |
3927 | return err; | |
9fe7bfce JF |
3928 | } |
3929 | ||
05c998b7 | 3930 | netif_tx_lock_bh(vi->dev); |
9fe7bfce | 3931 | netif_device_attach(vi->dev); |
05c998b7 | 3932 | netif_tx_unlock_bh(vi->dev); |
9fe7bfce JF |
3933 | return err; |
3934 | } | |
3935 | ||
3f93522f JW |
3936 | static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) |
3937 | { | |
3938 | struct scatterlist sg; | |
12e57169 | 3939 | vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); |
3f93522f | 3940 | |
12e57169 | 3941 | sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); |
3f93522f JW |
3942 | |
3943 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, | |
3944 | VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { | |
7934b481 | 3945 | dev_warn(&vi->dev->dev, "Fail to set guest offload.\n"); |
3f93522f JW |
3946 | return -EINVAL; |
3947 | } | |
3948 | ||
3949 | return 0; | |
3950 | } | |
3951 | ||
3952 | static int virtnet_clear_guest_offloads(struct virtnet_info *vi) | |
3953 | { | |
3954 | u64 offloads = 0; | |
3955 | ||
3956 | if (!vi->guest_offloads) | |
3957 | return 0; | |
3958 | ||
3f93522f JW |
3959 | return virtnet_set_guest_offloads(vi, offloads); |
3960 | } | |
3961 | ||
3962 | static int virtnet_restore_guest_offloads(struct virtnet_info *vi) | |
3963 | { | |
3964 | u64 offloads = vi->guest_offloads; | |
3965 | ||
3966 | if (!vi->guest_offloads) | |
3967 | return 0; | |
3f93522f JW |
3968 | |
3969 | return virtnet_set_guest_offloads(vi, offloads); | |
3970 | } | |
3971 | ||
9861ce03 JK |
3972 | static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, |
3973 | struct netlink_ext_ack *extack) | |
f600b690 | 3974 | { |
e814b958 HQ |
3975 | unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM + |
3976 | sizeof(struct skb_shared_info)); | |
3977 | unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN; | |
f600b690 JF |
3978 | struct virtnet_info *vi = netdev_priv(dev); |
3979 | struct bpf_prog *old_prog; | |
017b29c3 | 3980 | u16 xdp_qp = 0, curr_qp; |
672aafd5 | 3981 | int i, err; |
f600b690 | 3982 | |
3f93522f JW |
3983 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
3984 | && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || | |
3985 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || | |
3986 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || | |
18ba58e1 | 3987 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || |
418044e1 AM |
3988 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) || |
3989 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) || | |
3990 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) { | |
dbcf24d1 | 3991 | NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first"); |
f600b690 JF |
3992 | return -EOPNOTSUPP; |
3993 | } | |
3994 | ||
3995 | if (vi->mergeable_rx_bufs && !vi->any_header_sg) { | |
4d463c4d | 3996 | NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required"); |
f600b690 JF |
3997 | return -EINVAL; |
3998 | } | |
3999 | ||
8d9bc36d HQ |
4000 | if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) { |
4001 | NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags"); | |
4002 | netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz); | |
f600b690 JF |
4003 | return -EINVAL; |
4004 | } | |
4005 | ||
672aafd5 JF |
4006 | curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; |
4007 | if (prog) | |
4008 | xdp_qp = nr_cpu_ids; | |
4009 | ||
4010 | /* XDP requires extra queues for XDP_TX */ | |
4011 | if (curr_qp + xdp_qp > vi->max_queue_pairs) { | |
9ce4e3d6 XZ |
4012 | netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n", |
4013 | curr_qp + xdp_qp, vi->max_queue_pairs); | |
97c2c69e | 4014 | xdp_qp = 0; |
672aafd5 JF |
4015 | } |
4016 | ||
03aa6d34 TM |
4017 | old_prog = rtnl_dereference(vi->rq[0].xdp_prog); |
4018 | if (!prog && !old_prog) | |
4019 | return 0; | |
4020 | ||
85192dbf AN |
4021 | if (prog) |
4022 | bpf_prog_add(prog, vi->max_queue_pairs - 1); | |
2de2f7f4 | 4023 | |
4941d472 | 4024 | /* Make sure NAPI is not using any XDP TX queues for RX. */ |
534da5e8 TM |
4025 | if (netif_running(dev)) { |
4026 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
4e09ff53 | 4027 | napi_disable(&vi->rq[i].napi); |
534da5e8 TM |
4028 | virtnet_napi_tx_disable(&vi->sq[i].napi); |
4029 | } | |
4030 | } | |
f600b690 | 4031 | |
03aa6d34 TM |
4032 | if (!prog) { |
4033 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
4034 | rcu_assign_pointer(vi->rq[i].xdp_prog, prog); | |
4035 | if (i == 0) | |
4036 | virtnet_restore_guest_offloads(vi); | |
4037 | } | |
4038 | synchronize_net(); | |
4039 | } | |
f600b690 | 4040 | |
4941d472 JW |
4041 | err = _virtnet_set_queues(vi, curr_qp + xdp_qp); |
4042 | if (err) | |
4043 | goto err; | |
188313c1 | 4044 | netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); |
4941d472 | 4045 | vi->xdp_queue_pairs = xdp_qp; |
672aafd5 | 4046 | |
03aa6d34 | 4047 | if (prog) { |
97c2c69e | 4048 | vi->xdp_enabled = true; |
03aa6d34 TM |
4049 | for (i = 0; i < vi->max_queue_pairs; i++) { |
4050 | rcu_assign_pointer(vi->rq[i].xdp_prog, prog); | |
4051 | if (i == 0 && !old_prog) | |
3f93522f | 4052 | virtnet_clear_guest_offloads(vi); |
3f93522f | 4053 | } |
66c0e13a | 4054 | if (!old_prog) |
30bbf891 | 4055 | xdp_features_set_redirect_target(dev, true); |
97c2c69e | 4056 | } else { |
66c0e13a | 4057 | xdp_features_clear_redirect_target(dev); |
97c2c69e | 4058 | vi->xdp_enabled = false; |
03aa6d34 TM |
4059 | } |
4060 | ||
4061 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
f600b690 JF |
4062 | if (old_prog) |
4063 | bpf_prog_put(old_prog); | |
534da5e8 | 4064 | if (netif_running(dev)) { |
4e09ff53 | 4065 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); |
534da5e8 TM |
4066 | virtnet_napi_tx_enable(vi, vi->sq[i].vq, |
4067 | &vi->sq[i].napi); | |
4068 | } | |
f600b690 JF |
4069 | } |
4070 | ||
4071 | return 0; | |
2de2f7f4 | 4072 | |
4941d472 | 4073 | err: |
03aa6d34 TM |
4074 | if (!prog) { |
4075 | virtnet_clear_guest_offloads(vi); | |
4076 | for (i = 0; i < vi->max_queue_pairs; i++) | |
4077 | rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); | |
4078 | } | |
4079 | ||
8be4d9a4 | 4080 | if (netif_running(dev)) { |
534da5e8 | 4081 | for (i = 0; i < vi->max_queue_pairs; i++) { |
8be4d9a4 | 4082 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); |
534da5e8 TM |
4083 | virtnet_napi_tx_enable(vi, vi->sq[i].vq, |
4084 | &vi->sq[i].napi); | |
4085 | } | |
8be4d9a4 | 4086 | } |
2de2f7f4 JF |
4087 | if (prog) |
4088 | bpf_prog_sub(prog, vi->max_queue_pairs - 1); | |
4089 | return err; | |
f600b690 JF |
4090 | } |
4091 | ||
f4e63525 | 4092 | static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) |
f600b690 JF |
4093 | { |
4094 | switch (xdp->command) { | |
4095 | case XDP_SETUP_PROG: | |
9861ce03 | 4096 | return virtnet_xdp_set(dev, xdp->prog, xdp->extack); |
f600b690 JF |
4097 | default: |
4098 | return -EINVAL; | |
4099 | } | |
4100 | } | |
4101 | ||
ba5e4426 SS |
4102 | static int virtnet_get_phys_port_name(struct net_device *dev, char *buf, |
4103 | size_t len) | |
4104 | { | |
4105 | struct virtnet_info *vi = netdev_priv(dev); | |
4106 | int ret; | |
4107 | ||
4108 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) | |
4109 | return -EOPNOTSUPP; | |
4110 | ||
4111 | ret = snprintf(buf, len, "sby"); | |
4112 | if (ret >= len) | |
4113 | return -EOPNOTSUPP; | |
4114 | ||
4115 | return 0; | |
4116 | } | |
4117 | ||
a02e8964 WB |
4118 | static int virtnet_set_features(struct net_device *dev, |
4119 | netdev_features_t features) | |
4120 | { | |
4121 | struct virtnet_info *vi = netdev_priv(dev); | |
cf8691cb | 4122 | u64 offloads; |
a02e8964 WB |
4123 | int err; |
4124 | ||
dbcf24d1 | 4125 | if ((dev->features ^ features) & NETIF_F_GRO_HW) { |
97c2c69e | 4126 | if (vi->xdp_enabled) |
cf8691cb MT |
4127 | return -EBUSY; |
4128 | ||
dbcf24d1 | 4129 | if (features & NETIF_F_GRO_HW) |
cf8691cb | 4130 | offloads = vi->guest_offloads_capable; |
a02e8964 | 4131 | else |
cf8691cb | 4132 | offloads = vi->guest_offloads_capable & |
dbcf24d1 | 4133 | ~GUEST_OFFLOAD_GRO_HW_MASK; |
a02e8964 | 4134 | |
cf8691cb MT |
4135 | err = virtnet_set_guest_offloads(vi, offloads); |
4136 | if (err) | |
4137 | return err; | |
4138 | vi->guest_offloads = offloads; | |
a02e8964 WB |
4139 | } |
4140 | ||
c7114b12 AM |
4141 | if ((dev->features ^ features) & NETIF_F_RXHASH) { |
4142 | if (features & NETIF_F_RXHASH) | |
c1170820 | 4143 | vi->ctrl->rss.hash_types = vi->rss_hash_types_saved; |
c7114b12 AM |
4144 | else |
4145 | vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE; | |
4146 | ||
4147 | if (!virtnet_commit_rss_command(vi)) | |
4148 | return -EINVAL; | |
4149 | } | |
4150 | ||
a02e8964 WB |
4151 | return 0; |
4152 | } | |
4153 | ||
a520794b TL |
4154 | static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue) |
4155 | { | |
4156 | struct virtnet_info *priv = netdev_priv(dev); | |
4157 | struct send_queue *sq = &priv->sq[txqueue]; | |
4158 | struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue); | |
4159 | ||
4160 | u64_stats_update_begin(&sq->stats.syncp); | |
61217d8f | 4161 | u64_stats_inc(&sq->stats.tx_timeouts); |
a520794b TL |
4162 | u64_stats_update_end(&sq->stats.syncp); |
4163 | ||
4164 | netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n", | |
4165 | txqueue, sq->name, sq->vq->index, sq->vq->name, | |
5337824f | 4166 | jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start))); |
a520794b TL |
4167 | } |
4168 | ||
76288b4e SH |
4169 | static const struct net_device_ops virtnet_netdev = { |
4170 | .ndo_open = virtnet_open, | |
4171 | .ndo_stop = virtnet_close, | |
4172 | .ndo_start_xmit = start_xmit, | |
4173 | .ndo_validate_addr = eth_validate_addr, | |
9c46f6d4 | 4174 | .ndo_set_mac_address = virtnet_set_mac_address, |
2af7698e | 4175 | .ndo_set_rx_mode = virtnet_set_rx_mode, |
3fa2a1df | 4176 | .ndo_get_stats64 = virtnet_stats, |
1824a989 AW |
4177 | .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, |
4178 | .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, | |
f4e63525 | 4179 | .ndo_bpf = virtnet_xdp, |
186b3c99 | 4180 | .ndo_xdp_xmit = virtnet_xdp_xmit, |
2836b4f2 | 4181 | .ndo_features_check = passthru_features_check, |
ba5e4426 | 4182 | .ndo_get_phys_port_name = virtnet_get_phys_port_name, |
a02e8964 | 4183 | .ndo_set_features = virtnet_set_features, |
a520794b | 4184 | .ndo_tx_timeout = virtnet_tx_timeout, |
76288b4e SH |
4185 | }; |
4186 | ||
586d17c5 | 4187 | static void virtnet_config_changed_work(struct work_struct *work) |
9f4d26d0 | 4188 | { |
586d17c5 JW |
4189 | struct virtnet_info *vi = |
4190 | container_of(work, struct virtnet_info, config_work); | |
9f4d26d0 MM |
4191 | u16 v; |
4192 | ||
855e0c52 RR |
4193 | if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, |
4194 | struct virtio_net_config, status, &v) < 0) | |
507613bf | 4195 | return; |
586d17c5 JW |
4196 | |
4197 | if (v & VIRTIO_NET_S_ANNOUNCE) { | |
ee89bab1 | 4198 | netdev_notify_peers(vi->dev); |
586d17c5 JW |
4199 | virtnet_ack_link_announce(vi); |
4200 | } | |
9f4d26d0 MM |
4201 | |
4202 | /* Ignore unknown (future) status bits */ | |
4203 | v &= VIRTIO_NET_S_LINK_UP; | |
4204 | ||
4205 | if (vi->status == v) | |
507613bf | 4206 | return; |
9f4d26d0 MM |
4207 | |
4208 | vi->status = v; | |
4209 | ||
4210 | if (vi->status & VIRTIO_NET_S_LINK_UP) { | |
faa9b39f | 4211 | virtnet_update_settings(vi); |
9f4d26d0 | 4212 | netif_carrier_on(vi->dev); |
986a4f4d | 4213 | netif_tx_wake_all_queues(vi->dev); |
9f4d26d0 MM |
4214 | } else { |
4215 | netif_carrier_off(vi->dev); | |
986a4f4d | 4216 | netif_tx_stop_all_queues(vi->dev); |
9f4d26d0 MM |
4217 | } |
4218 | } | |
4219 | ||
4220 | static void virtnet_config_changed(struct virtio_device *vdev) | |
4221 | { | |
4222 | struct virtnet_info *vi = vdev->priv; | |
4223 | ||
3b07e9ca | 4224 | schedule_work(&vi->config_work); |
9f4d26d0 MM |
4225 | } |
4226 | ||
986a4f4d JW |
4227 | static void virtnet_free_queues(struct virtnet_info *vi) |
4228 | { | |
d4fb84ee AV |
4229 | int i; |
4230 | ||
ab3971b1 | 4231 | for (i = 0; i < vi->max_queue_pairs; i++) { |
5198d545 JK |
4232 | __netif_napi_del(&vi->rq[i].napi); |
4233 | __netif_napi_del(&vi->sq[i].napi); | |
ab3971b1 | 4234 | } |
d4fb84ee | 4235 | |
5198d545 | 4236 | /* We called __netif_napi_del(), |
963abe5c ED |
4237 | * we need to respect an RCU grace period before freeing vi->rq |
4238 | */ | |
4239 | synchronize_net(); | |
4240 | ||
986a4f4d JW |
4241 | kfree(vi->rq); |
4242 | kfree(vi->sq); | |
12e57169 | 4243 | kfree(vi->ctrl); |
986a4f4d JW |
4244 | } |
4245 | ||
47315329 | 4246 | static void _free_receive_bufs(struct virtnet_info *vi) |
986a4f4d | 4247 | { |
f600b690 | 4248 | struct bpf_prog *old_prog; |
986a4f4d JW |
4249 | int i; |
4250 | ||
4251 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
4252 | while (vi->rq[i].pages) | |
4253 | __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); | |
f600b690 JF |
4254 | |
4255 | old_prog = rtnl_dereference(vi->rq[i].xdp_prog); | |
4256 | RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); | |
4257 | if (old_prog) | |
4258 | bpf_prog_put(old_prog); | |
986a4f4d | 4259 | } |
47315329 JF |
4260 | } |
4261 | ||
4262 | static void free_receive_bufs(struct virtnet_info *vi) | |
4263 | { | |
4264 | rtnl_lock(); | |
4265 | _free_receive_bufs(vi); | |
f600b690 | 4266 | rtnl_unlock(); |
986a4f4d JW |
4267 | } |
4268 | ||
fb51879d MD |
4269 | static void free_receive_page_frags(struct virtnet_info *vi) |
4270 | { | |
4271 | int i; | |
4272 | for (i = 0; i < vi->max_queue_pairs; i++) | |
295525e2 XZ |
4273 | if (vi->rq[i].alloc_frag.page) { |
4274 | if (vi->rq[i].do_dma && vi->rq[i].last_dma) | |
4275 | virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0); | |
fb51879d | 4276 | put_page(vi->rq[i].alloc_frag.page); |
295525e2 | 4277 | } |
fb51879d MD |
4278 | } |
4279 | ||
6e345f8c XZ |
4280 | static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf) |
4281 | { | |
4282 | if (!is_xdp_frame(buf)) | |
4283 | dev_kfree_skb(buf); | |
4284 | else | |
4285 | xdp_return_frame(ptr_to_xdp(buf)); | |
4286 | } | |
4287 | ||
986a4f4d JW |
4288 | static void free_unused_bufs(struct virtnet_info *vi) |
4289 | { | |
4290 | void *buf; | |
4291 | int i; | |
4292 | ||
4293 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
4294 | struct virtqueue *vq = vi->sq[i].vq; | |
6e345f8c XZ |
4295 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) |
4296 | virtnet_sq_free_unused_buf(vq, buf); | |
f8bb5104 | 4297 | cond_resched(); |
986a4f4d JW |
4298 | } |
4299 | ||
4300 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
2311e06b | 4301 | struct virtqueue *vq = vi->rq[i].vq; |
295525e2 | 4302 | |
2311e06b XZ |
4303 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) |
4304 | virtnet_rq_unmap_free_buf(vq, buf); | |
f8bb5104 | 4305 | cond_resched(); |
986a4f4d JW |
4306 | } |
4307 | } | |
4308 | ||
e9d7417b JW |
4309 | static void virtnet_del_vqs(struct virtnet_info *vi) |
4310 | { | |
4311 | struct virtio_device *vdev = vi->vdev; | |
4312 | ||
310974fa | 4313 | virtnet_clean_affinity(vi); |
986a4f4d | 4314 | |
e9d7417b | 4315 | vdev->config->del_vqs(vdev); |
986a4f4d JW |
4316 | |
4317 | virtnet_free_queues(vi); | |
e9d7417b JW |
4318 | } |
4319 | ||
d85b758f MT |
4320 | /* How large should a single buffer be so a queue full of these can fit at |
4321 | * least one full packet? | |
4322 | * Logic below assumes the mergeable buffer header is used. | |
4323 | */ | |
4324 | static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq) | |
4325 | { | |
c1ddc42d | 4326 | const unsigned int hdr_len = vi->hdr_len; |
d85b758f MT |
4327 | unsigned int rq_size = virtqueue_get_vring_size(vq); |
4328 | unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; | |
4329 | unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; | |
4330 | unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); | |
4331 | ||
f0c3192c MT |
4332 | return max(max(min_buf_len, hdr_len) - hdr_len, |
4333 | (unsigned int)GOOD_PACKET_LEN); | |
d85b758f MT |
4334 | } |
4335 | ||
986a4f4d | 4336 | static int virtnet_find_vqs(struct virtnet_info *vi) |
3f9c10b0 | 4337 | { |
986a4f4d JW |
4338 | vq_callback_t **callbacks; |
4339 | struct virtqueue **vqs; | |
986a4f4d | 4340 | const char **names; |
e3fe8d28 ZY |
4341 | int ret = -ENOMEM; |
4342 | int total_vqs; | |
d45b897b | 4343 | bool *ctx; |
e3fe8d28 | 4344 | u16 i; |
986a4f4d JW |
4345 | |
4346 | /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by | |
4347 | * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by | |
4348 | * possible control vq. | |
4349 | */ | |
4350 | total_vqs = vi->max_queue_pairs * 2 + | |
4351 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); | |
4352 | ||
4353 | /* Allocate space for find_vqs parameters */ | |
6396bb22 | 4354 | vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL); |
986a4f4d JW |
4355 | if (!vqs) |
4356 | goto err_vq; | |
6da2ec56 | 4357 | callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL); |
986a4f4d JW |
4358 | if (!callbacks) |
4359 | goto err_callback; | |
6da2ec56 | 4360 | names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL); |
986a4f4d JW |
4361 | if (!names) |
4362 | goto err_names; | |
192f68cf | 4363 | if (!vi->big_packets || vi->mergeable_rx_bufs) { |
6396bb22 | 4364 | ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL); |
d45b897b MT |
4365 | if (!ctx) |
4366 | goto err_ctx; | |
4367 | } else { | |
4368 | ctx = NULL; | |
4369 | } | |
986a4f4d JW |
4370 | |
4371 | /* Parameters for control virtqueue, if any */ | |
4372 | if (vi->has_cvq) { | |
4373 | callbacks[total_vqs - 1] = NULL; | |
4374 | names[total_vqs - 1] = "control"; | |
4375 | } | |
3f9c10b0 | 4376 | |
986a4f4d JW |
4377 | /* Allocate/initialize parameters for send/receive virtqueues */ |
4378 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
4379 | callbacks[rxq2vq(i)] = skb_recv_done; | |
4380 | callbacks[txq2vq(i)] = skb_xmit_done; | |
e3fe8d28 ZY |
4381 | sprintf(vi->rq[i].name, "input.%u", i); |
4382 | sprintf(vi->sq[i].name, "output.%u", i); | |
986a4f4d JW |
4383 | names[rxq2vq(i)] = vi->rq[i].name; |
4384 | names[txq2vq(i)] = vi->sq[i].name; | |
d45b897b MT |
4385 | if (ctx) |
4386 | ctx[rxq2vq(i)] = true; | |
986a4f4d | 4387 | } |
3f9c10b0 | 4388 | |
2e9ca760 MT |
4389 | ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks, |
4390 | names, ctx, NULL); | |
986a4f4d JW |
4391 | if (ret) |
4392 | goto err_find; | |
3f9c10b0 | 4393 | |
986a4f4d JW |
4394 | if (vi->has_cvq) { |
4395 | vi->cvq = vqs[total_vqs - 1]; | |
3f9c10b0 | 4396 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) |
f646968f | 4397 | vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
3f9c10b0 | 4398 | } |
986a4f4d JW |
4399 | |
4400 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
4401 | vi->rq[i].vq = vqs[rxq2vq(i)]; | |
d85b758f | 4402 | vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); |
986a4f4d JW |
4403 | vi->sq[i].vq = vqs[txq2vq(i)]; |
4404 | } | |
4405 | ||
2fa3c8a8 | 4406 | /* run here: ret == 0. */ |
986a4f4d | 4407 | |
986a4f4d JW |
4408 | |
4409 | err_find: | |
d45b897b MT |
4410 | kfree(ctx); |
4411 | err_ctx: | |
986a4f4d JW |
4412 | kfree(names); |
4413 | err_names: | |
4414 | kfree(callbacks); | |
4415 | err_callback: | |
4416 | kfree(vqs); | |
4417 | err_vq: | |
4418 | return ret; | |
4419 | } | |
4420 | ||
4421 | static int virtnet_alloc_queues(struct virtnet_info *vi) | |
4422 | { | |
4423 | int i; | |
4424 | ||
122b84a1 MG |
4425 | if (vi->has_cvq) { |
4426 | vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); | |
4427 | if (!vi->ctrl) | |
4428 | goto err_ctrl; | |
4429 | } else { | |
4430 | vi->ctrl = NULL; | |
4431 | } | |
6396bb22 | 4432 | vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); |
986a4f4d JW |
4433 | if (!vi->sq) |
4434 | goto err_sq; | |
6396bb22 | 4435 | vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL); |
008d4278 | 4436 | if (!vi->rq) |
986a4f4d JW |
4437 | goto err_rq; |
4438 | ||
4439 | INIT_DELAYED_WORK(&vi->refill, refill_work); | |
4440 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
4441 | vi->rq[i].pages = NULL; | |
d484735d JK |
4442 | netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll, |
4443 | napi_weight); | |
8d602e1a JK |
4444 | netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi, |
4445 | virtnet_poll_tx, | |
4446 | napi_tx ? napi_weight : 0); | |
986a4f4d | 4447 | |
62087995 HQ |
4448 | INIT_WORK(&vi->rq[i].dim.work, virtnet_rx_dim_work); |
4449 | vi->rq[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; | |
4450 | ||
986a4f4d | 4451 | sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); |
5377d758 | 4452 | ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); |
986a4f4d | 4453 | sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); |
d7dfc5cf TM |
4454 | |
4455 | u64_stats_init(&vi->rq[i].stats.syncp); | |
4456 | u64_stats_init(&vi->sq[i].stats.syncp); | |
986a4f4d JW |
4457 | } |
4458 | ||
4459 | return 0; | |
4460 | ||
4461 | err_rq: | |
4462 | kfree(vi->sq); | |
4463 | err_sq: | |
12e57169 MT |
4464 | kfree(vi->ctrl); |
4465 | err_ctrl: | |
986a4f4d JW |
4466 | return -ENOMEM; |
4467 | } | |
4468 | ||
4469 | static int init_vqs(struct virtnet_info *vi) | |
4470 | { | |
4471 | int ret; | |
4472 | ||
4473 | /* Allocate send & receive queues */ | |
4474 | ret = virtnet_alloc_queues(vi); | |
4475 | if (ret) | |
4476 | goto err; | |
4477 | ||
4478 | ret = virtnet_find_vqs(vi); | |
4479 | if (ret) | |
4480 | goto err_free; | |
4481 | ||
295525e2 XZ |
4482 | virtnet_rq_set_premapped(vi); |
4483 | ||
a0d1d0f4 | 4484 | cpus_read_lock(); |
8898c21c | 4485 | virtnet_set_affinity(vi); |
a0d1d0f4 | 4486 | cpus_read_unlock(); |
47be2479 | 4487 | |
986a4f4d JW |
4488 | return 0; |
4489 | ||
4490 | err_free: | |
4491 | virtnet_free_queues(vi); | |
4492 | err: | |
4493 | return ret; | |
3f9c10b0 AS |
4494 | } |
4495 | ||
fbf28d78 MD |
4496 | #ifdef CONFIG_SYSFS |
4497 | static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, | |
718ad681 | 4498 | char *buf) |
fbf28d78 MD |
4499 | { |
4500 | struct virtnet_info *vi = netdev_priv(queue->dev); | |
4501 | unsigned int queue_index = get_netdev_rx_queue_index(queue); | |
3cc81a9a JW |
4502 | unsigned int headroom = virtnet_get_headroom(vi); |
4503 | unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; | |
5377d758 | 4504 | struct ewma_pkt_len *avg; |
fbf28d78 MD |
4505 | |
4506 | BUG_ON(queue_index >= vi->max_queue_pairs); | |
4507 | avg = &vi->rq[queue_index].mrg_avg_pkt_len; | |
d85b758f | 4508 | return sprintf(buf, "%u\n", |
3cc81a9a JW |
4509 | get_mergeable_buf_len(&vi->rq[queue_index], avg, |
4510 | SKB_DATA_ALIGN(headroom + tailroom))); | |
fbf28d78 MD |
4511 | } |
4512 | ||
4513 | static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = | |
4514 | __ATTR_RO(mergeable_rx_buffer_size); | |
4515 | ||
4516 | static struct attribute *virtio_net_mrg_rx_attrs[] = { | |
4517 | &mergeable_rx_buffer_size_attribute.attr, | |
4518 | NULL | |
4519 | }; | |
4520 | ||
4521 | static const struct attribute_group virtio_net_mrg_rx_group = { | |
4522 | .name = "virtio_net", | |
4523 | .attrs = virtio_net_mrg_rx_attrs | |
4524 | }; | |
4525 | #endif | |
4526 | ||
892d6eb1 JW |
4527 | static bool virtnet_fail_on_feature(struct virtio_device *vdev, |
4528 | unsigned int fbit, | |
4529 | const char *fname, const char *dname) | |
4530 | { | |
4531 | if (!virtio_has_feature(vdev, fbit)) | |
4532 | return false; | |
4533 | ||
4534 | dev_err(&vdev->dev, "device advertises feature %s but not %s", | |
4535 | fname, dname); | |
4536 | ||
4537 | return true; | |
4538 | } | |
4539 | ||
4540 | #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ | |
4541 | virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) | |
4542 | ||
4543 | static bool virtnet_validate_features(struct virtio_device *vdev) | |
4544 | { | |
4545 | if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && | |
4546 | (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, | |
4547 | "VIRTIO_NET_F_CTRL_VQ") || | |
4548 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, | |
4549 | "VIRTIO_NET_F_CTRL_VQ") || | |
4550 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, | |
4551 | "VIRTIO_NET_F_CTRL_VQ") || | |
4552 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || | |
4553 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, | |
c7114b12 AM |
4554 | "VIRTIO_NET_F_CTRL_VQ") || |
4555 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS, | |
91f41f01 AM |
4556 | "VIRTIO_NET_F_CTRL_VQ") || |
4557 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT, | |
699b045a AK |
4558 | "VIRTIO_NET_F_CTRL_VQ") || |
4559 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL, | |
8af3bf66 GL |
4560 | "VIRTIO_NET_F_CTRL_VQ") || |
4561 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_VQ_NOTF_COAL, | |
892d6eb1 JW |
4562 | "VIRTIO_NET_F_CTRL_VQ"))) { |
4563 | return false; | |
4564 | } | |
4565 | ||
4566 | return true; | |
4567 | } | |
4568 | ||
d0c2c997 JW |
4569 | #define MIN_MTU ETH_MIN_MTU |
4570 | #define MAX_MTU ETH_MAX_MTU | |
4571 | ||
fe36cbe0 | 4572 | static int virtnet_validate(struct virtio_device *vdev) |
296f96fc | 4573 | { |
6ba42248 MT |
4574 | if (!vdev->config->get) { |
4575 | dev_err(&vdev->dev, "%s failure: config access disabled\n", | |
4576 | __func__); | |
4577 | return -EINVAL; | |
4578 | } | |
4579 | ||
892d6eb1 JW |
4580 | if (!virtnet_validate_features(vdev)) |
4581 | return -EINVAL; | |
4582 | ||
fe36cbe0 MT |
4583 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { |
4584 | int mtu = virtio_cread16(vdev, | |
4585 | offsetof(struct virtio_net_config, | |
4586 | mtu)); | |
4587 | if (mtu < MIN_MTU) | |
4588 | __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); | |
4589 | } | |
4590 | ||
7c06458c LV |
4591 | if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) && |
4592 | !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { | |
4593 | dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby"); | |
4594 | __virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY); | |
4595 | } | |
4596 | ||
fe36cbe0 MT |
4597 | return 0; |
4598 | } | |
4599 | ||
46cd26f4 GL |
4600 | static bool virtnet_check_guest_gso(const struct virtnet_info *vi) |
4601 | { | |
4602 | return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || | |
4603 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || | |
4604 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || | |
418044e1 AM |
4605 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || |
4606 | (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) && | |
4607 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6)); | |
46cd26f4 GL |
4608 | } |
4609 | ||
4959aebb GL |
4610 | static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu) |
4611 | { | |
4612 | bool guest_gso = virtnet_check_guest_gso(vi); | |
4613 | ||
4614 | /* If device can receive ANY guest GSO packets, regardless of mtu, | |
4615 | * allocate packets of maximum size, otherwise limit it to only | |
4616 | * mtu size worth only. | |
4617 | */ | |
4618 | if (mtu > ETH_DATA_LEN || guest_gso) { | |
4619 | vi->big_packets = true; | |
4620 | vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE); | |
4621 | } | |
4622 | } | |
4623 | ||
fe36cbe0 MT |
4624 | static int virtnet_probe(struct virtio_device *vdev) |
4625 | { | |
d7dfc5cf | 4626 | int i, err = -ENOMEM; |
fe36cbe0 MT |
4627 | struct net_device *dev; |
4628 | struct virtnet_info *vi; | |
4629 | u16 max_queue_pairs; | |
4959aebb | 4630 | int mtu = 0; |
fe36cbe0 | 4631 | |
c7114b12 AM |
4632 | /* Find if host supports multiqueue/rss virtio_net device */ |
4633 | max_queue_pairs = 1; | |
4634 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) | |
4635 | max_queue_pairs = | |
4636 | virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs)); | |
986a4f4d JW |
4637 | |
4638 | /* We need at least 2 queue's */ | |
c7114b12 | 4639 | if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || |
986a4f4d JW |
4640 | max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || |
4641 | !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) | |
4642 | max_queue_pairs = 1; | |
296f96fc RR |
4643 | |
4644 | /* Allocate ourselves a network device with room for our info */ | |
986a4f4d | 4645 | dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); |
296f96fc RR |
4646 | if (!dev) |
4647 | return -ENOMEM; | |
4648 | ||
4649 | /* Set up network device as normal. */ | |
ab5bd583 XZ |
4650 | dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE | |
4651 | IFF_TX_SKB_NO_LINEAR; | |
76288b4e | 4652 | dev->netdev_ops = &virtnet_netdev; |
296f96fc | 4653 | dev->features = NETIF_F_HIGHDMA; |
3fa2a1df | 4654 | |
7ad24ea4 | 4655 | dev->ethtool_ops = &virtnet_ethtool_ops; |
296f96fc RR |
4656 | SET_NETDEV_DEV(dev, &vdev->dev); |
4657 | ||
4658 | /* Do we support "hardware" checksums? */ | |
98e778c9 | 4659 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { |
296f96fc | 4660 | /* This opens up the world of extra features. */ |
48900cb6 | 4661 | dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; |
98e778c9 | 4662 | if (csum) |
48900cb6 | 4663 | dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; |
98e778c9 MM |
4664 | |
4665 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { | |
e078de03 | 4666 | dev->hw_features |= NETIF_F_TSO |
34a48579 RR |
4667 | | NETIF_F_TSO_ECN | NETIF_F_TSO6; |
4668 | } | |
5539ae96 | 4669 | /* Individual feature bits: what can host handle? */ |
98e778c9 MM |
4670 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) |
4671 | dev->hw_features |= NETIF_F_TSO; | |
4672 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) | |
4673 | dev->hw_features |= NETIF_F_TSO6; | |
4674 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) | |
4675 | dev->hw_features |= NETIF_F_TSO_ECN; | |
418044e1 AM |
4676 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO)) |
4677 | dev->hw_features |= NETIF_F_GSO_UDP_L4; | |
98e778c9 | 4678 | |
41f2f127 JW |
4679 | dev->features |= NETIF_F_GSO_ROBUST; |
4680 | ||
98e778c9 | 4681 | if (gso) |
e078de03 | 4682 | dev->features |= dev->hw_features & NETIF_F_ALL_TSO; |
98e778c9 | 4683 | /* (!csum && gso) case will be fixed by register_netdev() */ |
296f96fc | 4684 | } |
4f49129b TH |
4685 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) |
4686 | dev->features |= NETIF_F_RXCSUM; | |
a02e8964 WB |
4687 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || |
4688 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)) | |
dbcf24d1 | 4689 | dev->features |= NETIF_F_GRO_HW; |
cf8691cb | 4690 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) |
dbcf24d1 | 4691 | dev->hw_features |= NETIF_F_GRO_HW; |
296f96fc | 4692 | |
4fda8302 | 4693 | dev->vlan_features = dev->features; |
66c0e13a | 4694 | dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT; |
4fda8302 | 4695 | |
d0c2c997 JW |
4696 | /* MTU range: 68 - 65535 */ |
4697 | dev->min_mtu = MIN_MTU; | |
4698 | dev->max_mtu = MAX_MTU; | |
4699 | ||
296f96fc | 4700 | /* Configuration may specify what MAC to use. Otherwise random. */ |
f2edaa4a JK |
4701 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { |
4702 | u8 addr[ETH_ALEN]; | |
4703 | ||
855e0c52 RR |
4704 | virtio_cread_bytes(vdev, |
4705 | offsetof(struct virtio_net_config, mac), | |
f2edaa4a JK |
4706 | addr, ETH_ALEN); |
4707 | eth_hw_addr_set(dev, addr); | |
4708 | } else { | |
f2cedb63 | 4709 | eth_hw_addr_random(dev); |
9f62d221 LV |
4710 | dev_info(&vdev->dev, "Assigned random MAC address %pM\n", |
4711 | dev->dev_addr); | |
f2edaa4a | 4712 | } |
296f96fc RR |
4713 | |
4714 | /* Set up our device-specific information */ | |
4715 | vi = netdev_priv(dev); | |
296f96fc RR |
4716 | vi->dev = dev; |
4717 | vi->vdev = vdev; | |
d9d5dcc8 | 4718 | vdev->priv = vi; |
827da44c | 4719 | |
586d17c5 | 4720 | INIT_WORK(&vi->config_work, virtnet_config_changed_work); |
b9f74252 | 4721 | INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work); |
5a159128 | 4722 | spin_lock_init(&vi->refill_lock); |
296f96fc | 4723 | |
30bbf891 | 4724 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) { |
3f2c31d9 | 4725 | vi->mergeable_rx_bufs = true; |
30bbf891 LB |
4726 | dev->xdp_features |= NETDEV_XDP_ACT_RX_SG; |
4727 | } | |
3f2c31d9 | 4728 | |
91f41f01 AM |
4729 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT)) |
4730 | vi->has_rss_hash_report = true; | |
4731 | ||
4732 | if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) | |
c7114b12 | 4733 | vi->has_rss = true; |
91f41f01 AM |
4734 | |
4735 | if (vi->has_rss || vi->has_rss_hash_report) { | |
c7114b12 AM |
4736 | vi->rss_indir_table_size = |
4737 | virtio_cread16(vdev, offsetof(struct virtio_net_config, | |
4738 | rss_max_indirection_table_length)); | |
4739 | vi->rss_key_size = | |
4740 | virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size)); | |
4741 | ||
4742 | vi->rss_hash_types_supported = | |
4743 | virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types)); | |
4744 | vi->rss_hash_types_supported &= | |
4745 | ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX | | |
4746 | VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | | |
4747 | VIRTIO_NET_RSS_HASH_TYPE_UDP_EX); | |
4748 | ||
4749 | dev->hw_features |= NETIF_F_RXHASH; | |
4750 | } | |
91f41f01 AM |
4751 | |
4752 | if (vi->has_rss_hash_report) | |
4753 | vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash); | |
4754 | else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) || | |
4755 | virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) | |
012873d0 MT |
4756 | vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
4757 | else | |
4758 | vi->hdr_len = sizeof(struct virtio_net_hdr); | |
4759 | ||
75993300 MT |
4760 | if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || |
4761 | virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) | |
e7428e95 MT |
4762 | vi->any_header_sg = true; |
4763 | ||
986a4f4d JW |
4764 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) |
4765 | vi->has_cvq = true; | |
4766 | ||
14de9d11 AC |
4767 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { |
4768 | mtu = virtio_cread16(vdev, | |
4769 | offsetof(struct virtio_net_config, | |
4770 | mtu)); | |
93a205ee | 4771 | if (mtu < dev->min_mtu) { |
fe36cbe0 MT |
4772 | /* Should never trigger: MTU was previously validated |
4773 | * in virtnet_validate. | |
4774 | */ | |
7934b481 YS |
4775 | dev_err(&vdev->dev, |
4776 | "device MTU appears to have changed it is now %d < %d", | |
4777 | mtu, dev->min_mtu); | |
411ea23a | 4778 | err = -EINVAL; |
d7dfc5cf | 4779 | goto free; |
93a205ee | 4780 | } |
2e123b44 | 4781 | |
fe36cbe0 MT |
4782 | dev->mtu = mtu; |
4783 | dev->max_mtu = mtu; | |
14de9d11 AC |
4784 | } |
4785 | ||
4959aebb GL |
4786 | virtnet_set_big_packets(vi, mtu); |
4787 | ||
012873d0 MT |
4788 | if (vi->any_header_sg) |
4789 | dev->needed_headroom = vi->hdr_len; | |
6ebbc1a6 | 4790 | |
44900010 JW |
4791 | /* Enable multiqueue by default */ |
4792 | if (num_online_cpus() >= max_queue_pairs) | |
4793 | vi->curr_queue_pairs = max_queue_pairs; | |
4794 | else | |
4795 | vi->curr_queue_pairs = num_online_cpus(); | |
986a4f4d JW |
4796 | vi->max_queue_pairs = max_queue_pairs; |
4797 | ||
4798 | /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ | |
3f9c10b0 | 4799 | err = init_vqs(vi); |
d2a7ddda | 4800 | if (err) |
d7dfc5cf | 4801 | goto free; |
296f96fc | 4802 | |
3014a0d5 HQ |
4803 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { |
4804 | vi->intr_coal_rx.max_usecs = 0; | |
4805 | vi->intr_coal_tx.max_usecs = 0; | |
4806 | vi->intr_coal_rx.max_packets = 0; | |
4807 | ||
4808 | /* Keep the default values of the coalescing parameters | |
4809 | * aligned with the default napi_tx state. | |
4810 | */ | |
4811 | if (vi->sq[0].napi.weight) | |
4812 | vi->intr_coal_tx.max_packets = 1; | |
4813 | else | |
4814 | vi->intr_coal_tx.max_packets = 0; | |
4815 | } | |
4816 | ||
4817 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { | |
4818 | /* The reason is the same as VIRTIO_NET_F_NOTF_COAL. */ | |
4819 | for (i = 0; i < vi->max_queue_pairs; i++) | |
4820 | if (vi->sq[i].napi.weight) | |
4821 | vi->sq[i].intr_coal.max_packets = 1; | |
4822 | } | |
4823 | ||
fbf28d78 MD |
4824 | #ifdef CONFIG_SYSFS |
4825 | if (vi->mergeable_rx_bufs) | |
4826 | dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; | |
4827 | #endif | |
0f13b66b ZYW |
4828 | netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); |
4829 | netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); | |
986a4f4d | 4830 | |
2e9ca760 MT |
4831 | virtnet_init_settings(dev); |
4832 | ||
ba5e4426 SS |
4833 | if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) { |
4834 | vi->failover = net_failover_create(vi->dev); | |
4b8e6ac4 WY |
4835 | if (IS_ERR(vi->failover)) { |
4836 | err = PTR_ERR(vi->failover); | |
ba5e4426 | 4837 | goto free_vqs; |
4b8e6ac4 | 4838 | } |
ba5e4426 SS |
4839 | } |
4840 | ||
91f41f01 | 4841 | if (vi->has_rss || vi->has_rss_hash_report) |
c7114b12 AM |
4842 | virtnet_init_default_rss(vi); |
4843 | ||
b9f74252 JW |
4844 | enable_rx_mode_work(vi); |
4845 | ||
50c0ada6 JW |
4846 | /* serialize netdev register + virtio_device_ready() with ndo_open() */ |
4847 | rtnl_lock(); | |
4848 | ||
4849 | err = register_netdevice(dev); | |
296f96fc RR |
4850 | if (err) { |
4851 | pr_debug("virtio_net: registering device failed\n"); | |
50c0ada6 | 4852 | rtnl_unlock(); |
ba5e4426 | 4853 | goto free_failover; |
296f96fc | 4854 | } |
b3369c1f | 4855 | |
4baf1e33 MT |
4856 | virtio_device_ready(vdev); |
4857 | ||
51b81317 JW |
4858 | _virtnet_set_queues(vi, vi->curr_queue_pairs); |
4859 | ||
9f62d221 LV |
4860 | /* a random MAC address has been assigned, notify the device. |
4861 | * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there | |
4862 | * because many devices work fine without getting MAC explicitly | |
4863 | */ | |
4864 | if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && | |
4865 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { | |
4866 | struct scatterlist sg; | |
4867 | ||
4868 | sg_init_one(&sg, dev->dev_addr, dev->addr_len); | |
4869 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, | |
4870 | VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { | |
4871 | pr_debug("virtio_net: setting MAC address failed\n"); | |
4872 | rtnl_unlock(); | |
4873 | err = -EINVAL; | |
4874 | goto free_unregister_netdev; | |
4875 | } | |
4876 | } | |
4877 | ||
50c0ada6 JW |
4878 | rtnl_unlock(); |
4879 | ||
8017c279 | 4880 | err = virtnet_cpu_notif_add(vi); |
8de4b2f3 WG |
4881 | if (err) { |
4882 | pr_debug("virtio_net: registering cpu notifier failed\n"); | |
f00e35e2 | 4883 | goto free_unregister_netdev; |
8de4b2f3 WG |
4884 | } |
4885 | ||
167c25e4 JW |
4886 | /* Assume link up if device can't report link status, |
4887 | otherwise get link status from config. */ | |
bda7fab5 | 4888 | netif_carrier_off(dev); |
167c25e4 | 4889 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { |
3b07e9ca | 4890 | schedule_work(&vi->config_work); |
167c25e4 JW |
4891 | } else { |
4892 | vi->status = VIRTIO_NET_S_LINK_UP; | |
faa9b39f | 4893 | virtnet_update_settings(vi); |
167c25e4 JW |
4894 | netif_carrier_on(dev); |
4895 | } | |
9f4d26d0 | 4896 | |
3f93522f JW |
4897 | for (i = 0; i < ARRAY_SIZE(guest_offloads); i++) |
4898 | if (virtio_has_feature(vi->vdev, guest_offloads[i])) | |
4899 | set_bit(guest_offloads[i], &vi->guest_offloads); | |
a02e8964 | 4900 | vi->guest_offloads_capable = vi->guest_offloads; |
3f93522f | 4901 | |
986a4f4d JW |
4902 | pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", |
4903 | dev->name, max_queue_pairs); | |
4904 | ||
296f96fc RR |
4905 | return 0; |
4906 | ||
f00e35e2 | 4907 | free_unregister_netdev: |
b3369c1f | 4908 | unregister_netdev(dev); |
ba5e4426 SS |
4909 | free_failover: |
4910 | net_failover_destroy(vi->failover); | |
d2a7ddda | 4911 | free_vqs: |
b0686565 | 4912 | virtio_reset_device(vdev); |
986a4f4d | 4913 | cancel_delayed_work_sync(&vi->refill); |
fb51879d | 4914 | free_receive_page_frags(vi); |
e9d7417b | 4915 | virtnet_del_vqs(vi); |
296f96fc RR |
4916 | free: |
4917 | free_netdev(dev); | |
4918 | return err; | |
4919 | } | |
4920 | ||
04486ed0 | 4921 | static void remove_vq_common(struct virtnet_info *vi) |
296f96fc | 4922 | { |
d9679d00 | 4923 | virtio_reset_device(vi->vdev); |
830a8a97 SM |
4924 | |
4925 | /* Free unused buffers in both send and recv, if any. */ | |
9ab86bbc | 4926 | free_unused_bufs(vi); |
fb6813f4 | 4927 | |
986a4f4d | 4928 | free_receive_bufs(vi); |
d2a7ddda | 4929 | |
fb51879d MD |
4930 | free_receive_page_frags(vi); |
4931 | ||
986a4f4d | 4932 | virtnet_del_vqs(vi); |
04486ed0 AS |
4933 | } |
4934 | ||
8cc085d6 | 4935 | static void virtnet_remove(struct virtio_device *vdev) |
04486ed0 AS |
4936 | { |
4937 | struct virtnet_info *vi = vdev->priv; | |
4938 | ||
8017c279 | 4939 | virtnet_cpu_notif_remove(vi); |
8de4b2f3 | 4940 | |
102a2786 MT |
4941 | /* Make sure no work handler is accessing the device. */ |
4942 | flush_work(&vi->config_work); | |
b9f74252 JW |
4943 | disable_rx_mode_work(vi); |
4944 | flush_work(&vi->rx_mode_work); | |
586d17c5 | 4945 | |
04486ed0 AS |
4946 | unregister_netdev(vi->dev); |
4947 | ||
ba5e4426 SS |
4948 | net_failover_destroy(vi->failover); |
4949 | ||
04486ed0 | 4950 | remove_vq_common(vi); |
fb6813f4 | 4951 | |
74b2553f | 4952 | free_netdev(vi->dev); |
296f96fc RR |
4953 | } |
4954 | ||
67a75194 | 4955 | static __maybe_unused int virtnet_freeze(struct virtio_device *vdev) |
0741bcb5 AS |
4956 | { |
4957 | struct virtnet_info *vi = vdev->priv; | |
4958 | ||
8017c279 | 4959 | virtnet_cpu_notif_remove(vi); |
9fe7bfce | 4960 | virtnet_freeze_down(vdev); |
0741bcb5 AS |
4961 | remove_vq_common(vi); |
4962 | ||
4963 | return 0; | |
4964 | } | |
4965 | ||
67a75194 | 4966 | static __maybe_unused int virtnet_restore(struct virtio_device *vdev) |
0741bcb5 AS |
4967 | { |
4968 | struct virtnet_info *vi = vdev->priv; | |
9fe7bfce | 4969 | int err; |
0741bcb5 | 4970 | |
9fe7bfce | 4971 | err = virtnet_restore_up(vdev); |
0741bcb5 AS |
4972 | if (err) |
4973 | return err; | |
986a4f4d JW |
4974 | virtnet_set_queues(vi, vi->curr_queue_pairs); |
4975 | ||
8017c279 | 4976 | err = virtnet_cpu_notif_add(vi); |
3f2869ca XY |
4977 | if (err) { |
4978 | virtnet_freeze_down(vdev); | |
4979 | remove_vq_common(vi); | |
ec9debbd | 4980 | return err; |
3f2869ca | 4981 | } |
ec9debbd | 4982 | |
0741bcb5 AS |
4983 | return 0; |
4984 | } | |
0741bcb5 | 4985 | |
296f96fc RR |
4986 | static struct virtio_device_id id_table[] = { |
4987 | { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, | |
4988 | { 0 }, | |
4989 | }; | |
4990 | ||
f3358507 MT |
4991 | #define VIRTNET_FEATURES \ |
4992 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ | |
4993 | VIRTIO_NET_F_MAC, \ | |
4994 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ | |
4995 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ | |
4996 | VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ | |
418044e1 | 4997 | VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \ |
f3358507 MT |
4998 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ |
4999 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ | |
5000 | VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ | |
5001 | VIRTIO_NET_F_CTRL_MAC_ADDR, \ | |
faa9b39f | 5002 | VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \ |
c7114b12 | 5003 | VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \ |
be50da3e | 5004 | VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \ |
8af3bf66 | 5005 | VIRTIO_NET_F_VQ_NOTF_COAL, \ |
be50da3e | 5006 | VIRTIO_NET_F_GUEST_HDRLEN |
f3358507 | 5007 | |
c45a6816 | 5008 | static unsigned int features[] = { |
f3358507 MT |
5009 | VIRTNET_FEATURES, |
5010 | }; | |
5011 | ||
5012 | static unsigned int features_legacy[] = { | |
5013 | VIRTNET_FEATURES, | |
5014 | VIRTIO_NET_F_GSO, | |
e7428e95 | 5015 | VIRTIO_F_ANY_LAYOUT, |
c45a6816 RR |
5016 | }; |
5017 | ||
22402529 | 5018 | static struct virtio_driver virtio_net_driver = { |
c45a6816 RR |
5019 | .feature_table = features, |
5020 | .feature_table_size = ARRAY_SIZE(features), | |
f3358507 MT |
5021 | .feature_table_legacy = features_legacy, |
5022 | .feature_table_size_legacy = ARRAY_SIZE(features_legacy), | |
296f96fc RR |
5023 | .driver.name = KBUILD_MODNAME, |
5024 | .driver.owner = THIS_MODULE, | |
5025 | .id_table = id_table, | |
fe36cbe0 | 5026 | .validate = virtnet_validate, |
296f96fc | 5027 | .probe = virtnet_probe, |
8cc085d6 | 5028 | .remove = virtnet_remove, |
9f4d26d0 | 5029 | .config_changed = virtnet_config_changed, |
89107000 | 5030 | #ifdef CONFIG_PM_SLEEP |
0741bcb5 AS |
5031 | .freeze = virtnet_freeze, |
5032 | .restore = virtnet_restore, | |
5033 | #endif | |
296f96fc RR |
5034 | }; |
5035 | ||
8017c279 SAS |
5036 | static __init int virtio_net_driver_init(void) |
5037 | { | |
5038 | int ret; | |
5039 | ||
73c1b41e | 5040 | ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online", |
8017c279 SAS |
5041 | virtnet_cpu_online, |
5042 | virtnet_cpu_down_prep); | |
5043 | if (ret < 0) | |
5044 | goto out; | |
5045 | virtionet_online = ret; | |
73c1b41e | 5046 | ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead", |
8017c279 SAS |
5047 | NULL, virtnet_cpu_dead); |
5048 | if (ret) | |
5049 | goto err_dead; | |
4f50ef15 | 5050 | ret = register_virtio_driver(&virtio_net_driver); |
8017c279 SAS |
5051 | if (ret) |
5052 | goto err_virtio; | |
5053 | return 0; | |
5054 | err_virtio: | |
5055 | cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); | |
5056 | err_dead: | |
5057 | cpuhp_remove_multi_state(virtionet_online); | |
5058 | out: | |
5059 | return ret; | |
5060 | } | |
5061 | module_init(virtio_net_driver_init); | |
5062 | ||
5063 | static __exit void virtio_net_driver_exit(void) | |
5064 | { | |
cfa0ebc9 | 5065 | unregister_virtio_driver(&virtio_net_driver); |
8017c279 SAS |
5066 | cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); |
5067 | cpuhp_remove_multi_state(virtionet_online); | |
8017c279 SAS |
5068 | } |
5069 | module_exit(virtio_net_driver_exit); | |
296f96fc RR |
5070 | |
5071 | MODULE_DEVICE_TABLE(virtio, id_table); | |
5072 | MODULE_DESCRIPTION("Virtio network driver"); | |
5073 | MODULE_LICENSE("GPL"); |