Commit | Line | Data |
---|---|---|
48925e37 | 1 | /* A network driver using virtio. |
296f96fc RR |
2 | * |
3 | * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; either version 2 of the License, or | |
8 | * (at your option) any later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
adf8d3ff | 16 | * along with this program; if not, see <http://www.gnu.org/licenses/>. |
296f96fc RR |
17 | */ |
18 | //#define DEBUG | |
19 | #include <linux/netdevice.h> | |
20 | #include <linux/etherdevice.h> | |
a9ea3fc6 | 21 | #include <linux/ethtool.h> |
296f96fc RR |
22 | #include <linux/module.h> |
23 | #include <linux/virtio.h> | |
24 | #include <linux/virtio_net.h> | |
f600b690 | 25 | #include <linux/bpf.h> |
a67edbf4 | 26 | #include <linux/bpf_trace.h> |
296f96fc | 27 | #include <linux/scatterlist.h> |
e918085a | 28 | #include <linux/if_vlan.h> |
5a0e3ad6 | 29 | #include <linux/slab.h> |
8de4b2f3 | 30 | #include <linux/cpu.h> |
ab7db917 | 31 | #include <linux/average.h> |
186b3c99 | 32 | #include <linux/filter.h> |
d85b758f | 33 | #include <net/route.h> |
754b8a21 | 34 | #include <net/xdp.h> |
296f96fc | 35 | |
d34710e3 | 36 | static int napi_weight = NAPI_POLL_WEIGHT; |
6c0cd7c0 DL |
37 | module_param(napi_weight, int, 0444); |
38 | ||
b92f1e67 | 39 | static bool csum = true, gso = true, napi_tx; |
34a48579 RR |
40 | module_param(csum, bool, 0444); |
41 | module_param(gso, bool, 0444); | |
b92f1e67 | 42 | module_param(napi_tx, bool, 0644); |
34a48579 | 43 | |
296f96fc | 44 | /* FIXME: MTU in config. */ |
5061de36 | 45 | #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) |
3f2c31d9 | 46 | #define GOOD_COPY_LEN 128 |
296f96fc | 47 | |
f6b10209 JW |
48 | #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) |
49 | ||
2de2f7f4 JF |
50 | /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ |
51 | #define VIRTIO_XDP_HEADROOM 256 | |
52 | ||
5377d758 JB |
53 | /* RX packet size EWMA. The average packet size is used to determine the packet |
54 | * buffer size when refilling RX rings. As the entire RX ring may be refilled | |
55 | * at once, the weight is chosen so that the EWMA will be insensitive to short- | |
56 | * term, transient changes in packet size. | |
ab7db917 | 57 | */ |
eb1e011a | 58 | DECLARE_EWMA(pkt_len, 0, 64) |
ab7db917 | 59 | |
66846048 | 60 | #define VIRTNET_DRIVER_VERSION "1.0.0" |
2a41f71d | 61 | |
7acd4329 CIK |
62 | static const unsigned long guest_offloads[] = { |
63 | VIRTIO_NET_F_GUEST_TSO4, | |
64 | VIRTIO_NET_F_GUEST_TSO6, | |
65 | VIRTIO_NET_F_GUEST_ECN, | |
66 | VIRTIO_NET_F_GUEST_UFO | |
67 | }; | |
3f93522f | 68 | |
d7dfc5cf TM |
69 | struct virtnet_stat_desc { |
70 | char desc[ETH_GSTRING_LEN]; | |
71 | size_t offset; | |
3fa2a1df | 72 | }; |
73 | ||
d7dfc5cf TM |
74 | struct virtnet_sq_stats { |
75 | struct u64_stats_sync syncp; | |
76 | u64 packets; | |
77 | u64 bytes; | |
78 | }; | |
79 | ||
80 | struct virtnet_rq_stats { | |
81 | struct u64_stats_sync syncp; | |
82 | u64 packets; | |
83 | u64 bytes; | |
84 | }; | |
85 | ||
86 | #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) | |
87 | #define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m) | |
88 | ||
89 | static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { | |
90 | { "packets", VIRTNET_SQ_STAT(packets) }, | |
91 | { "bytes", VIRTNET_SQ_STAT(bytes) }, | |
92 | }; | |
93 | ||
94 | static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { | |
95 | { "packets", VIRTNET_RQ_STAT(packets) }, | |
96 | { "bytes", VIRTNET_RQ_STAT(bytes) }, | |
97 | }; | |
98 | ||
99 | #define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc) | |
100 | #define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc) | |
101 | ||
e9d7417b JW |
102 | /* Internal representation of a send virtqueue */ |
103 | struct send_queue { | |
104 | /* Virtqueue associated with this send _queue */ | |
105 | struct virtqueue *vq; | |
106 | ||
107 | /* TX: fragments + linear part + virtio header */ | |
108 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; | |
986a4f4d JW |
109 | |
110 | /* Name of the send queue: output.$index */ | |
111 | char name[40]; | |
b92f1e67 | 112 | |
d7dfc5cf TM |
113 | struct virtnet_sq_stats stats; |
114 | ||
b92f1e67 | 115 | struct napi_struct napi; |
e9d7417b JW |
116 | }; |
117 | ||
118 | /* Internal representation of a receive virtqueue */ | |
119 | struct receive_queue { | |
120 | /* Virtqueue associated with this receive_queue */ | |
121 | struct virtqueue *vq; | |
122 | ||
296f96fc RR |
123 | struct napi_struct napi; |
124 | ||
f600b690 JF |
125 | struct bpf_prog __rcu *xdp_prog; |
126 | ||
d7dfc5cf TM |
127 | struct virtnet_rq_stats stats; |
128 | ||
e9d7417b JW |
129 | /* Chain pages by the private ptr. */ |
130 | struct page *pages; | |
131 | ||
ab7db917 | 132 | /* Average packet length for mergeable receive buffers. */ |
5377d758 | 133 | struct ewma_pkt_len mrg_avg_pkt_len; |
ab7db917 | 134 | |
fb51879d MD |
135 | /* Page frag for packet buffer allocation. */ |
136 | struct page_frag alloc_frag; | |
137 | ||
e9d7417b JW |
138 | /* RX: fragments + linear part + virtio header */ |
139 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; | |
986a4f4d | 140 | |
d85b758f MT |
141 | /* Min single buffer size for mergeable buffers case. */ |
142 | unsigned int min_buf_len; | |
143 | ||
986a4f4d JW |
144 | /* Name of this receive queue: input.$index */ |
145 | char name[40]; | |
754b8a21 JDB |
146 | |
147 | struct xdp_rxq_info xdp_rxq; | |
e9d7417b JW |
148 | }; |
149 | ||
150 | struct virtnet_info { | |
151 | struct virtio_device *vdev; | |
152 | struct virtqueue *cvq; | |
153 | struct net_device *dev; | |
986a4f4d JW |
154 | struct send_queue *sq; |
155 | struct receive_queue *rq; | |
e9d7417b JW |
156 | unsigned int status; |
157 | ||
986a4f4d JW |
158 | /* Max # of queue pairs supported by the device */ |
159 | u16 max_queue_pairs; | |
160 | ||
161 | /* # of queue pairs currently used by the driver */ | |
162 | u16 curr_queue_pairs; | |
163 | ||
672aafd5 JF |
164 | /* # of XDP queue pairs currently used by the driver */ |
165 | u16 xdp_queue_pairs; | |
166 | ||
97402b96 HX |
167 | /* I like... big packets and I cannot lie! */ |
168 | bool big_packets; | |
169 | ||
3f2c31d9 MM |
170 | /* Host will merge rx buffers for big packets (shake it! shake it!) */ |
171 | bool mergeable_rx_bufs; | |
172 | ||
986a4f4d JW |
173 | /* Has control virtqueue */ |
174 | bool has_cvq; | |
175 | ||
e7428e95 MT |
176 | /* Host can handle any s/g split between our header and packet data */ |
177 | bool any_header_sg; | |
178 | ||
012873d0 MT |
179 | /* Packet virtio header size */ |
180 | u8 hdr_len; | |
181 | ||
3161e453 RR |
182 | /* Work struct for refilling if we run low on memory. */ |
183 | struct delayed_work refill; | |
184 | ||
586d17c5 JW |
185 | /* Work struct for config space updates */ |
186 | struct work_struct config_work; | |
187 | ||
986a4f4d JW |
188 | /* Does the affinity hint is set for virtqueues? */ |
189 | bool affinity_hint_set; | |
47be2479 | 190 | |
8017c279 SAS |
191 | /* CPU hotplug instances for online & dead */ |
192 | struct hlist_node node; | |
193 | struct hlist_node node_dead; | |
2ac46030 MT |
194 | |
195 | /* Control VQ buffers: protected by the rtnl lock */ | |
196 | struct virtio_net_ctrl_hdr ctrl_hdr; | |
197 | virtio_net_ctrl_ack ctrl_status; | |
a725ee3e | 198 | struct virtio_net_ctrl_mq ctrl_mq; |
2ac46030 MT |
199 | u8 ctrl_promisc; |
200 | u8 ctrl_allmulti; | |
a725ee3e | 201 | u16 ctrl_vid; |
3f93522f | 202 | u64 ctrl_offloads; |
16032be5 NA |
203 | |
204 | /* Ethtool settings */ | |
205 | u8 duplex; | |
206 | u32 speed; | |
3f93522f JW |
207 | |
208 | unsigned long guest_offloads; | |
296f96fc RR |
209 | }; |
210 | ||
9ab86bbc | 211 | struct padded_vnet_hdr { |
012873d0 | 212 | struct virtio_net_hdr_mrg_rxbuf hdr; |
9ab86bbc | 213 | /* |
012873d0 MT |
214 | * hdr is in a separate sg buffer, and data sg buffer shares same page |
215 | * with this header sg. This padding makes next sg 16 byte aligned | |
216 | * after the header. | |
9ab86bbc | 217 | */ |
012873d0 | 218 | char padding[4]; |
9ab86bbc SM |
219 | }; |
220 | ||
986a4f4d JW |
221 | /* Converting between virtqueue no. and kernel tx/rx queue no. |
222 | * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq | |
223 | */ | |
224 | static int vq2txq(struct virtqueue *vq) | |
225 | { | |
9d0ca6ed | 226 | return (vq->index - 1) / 2; |
986a4f4d JW |
227 | } |
228 | ||
229 | static int txq2vq(int txq) | |
230 | { | |
231 | return txq * 2 + 1; | |
232 | } | |
233 | ||
234 | static int vq2rxq(struct virtqueue *vq) | |
235 | { | |
9d0ca6ed | 236 | return vq->index / 2; |
986a4f4d JW |
237 | } |
238 | ||
239 | static int rxq2vq(int rxq) | |
240 | { | |
241 | return rxq * 2; | |
242 | } | |
243 | ||
012873d0 | 244 | static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb) |
296f96fc | 245 | { |
012873d0 | 246 | return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb; |
296f96fc RR |
247 | } |
248 | ||
9ab86bbc SM |
249 | /* |
250 | * private is used to chain pages for big packets, put the whole | |
251 | * most recent used list in the beginning for reuse | |
252 | */ | |
e9d7417b | 253 | static void give_pages(struct receive_queue *rq, struct page *page) |
0a888fd1 | 254 | { |
9ab86bbc | 255 | struct page *end; |
0a888fd1 | 256 | |
e9d7417b | 257 | /* Find end of list, sew whole thing into vi->rq.pages. */ |
9ab86bbc | 258 | for (end = page; end->private; end = (struct page *)end->private); |
e9d7417b JW |
259 | end->private = (unsigned long)rq->pages; |
260 | rq->pages = page; | |
0a888fd1 MM |
261 | } |
262 | ||
e9d7417b | 263 | static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) |
fb6813f4 | 264 | { |
e9d7417b | 265 | struct page *p = rq->pages; |
fb6813f4 | 266 | |
9ab86bbc | 267 | if (p) { |
e9d7417b | 268 | rq->pages = (struct page *)p->private; |
9ab86bbc SM |
269 | /* clear private here, it is used to chain pages */ |
270 | p->private = 0; | |
271 | } else | |
fb6813f4 RR |
272 | p = alloc_page(gfp_mask); |
273 | return p; | |
274 | } | |
275 | ||
e4e8452a WB |
276 | static void virtqueue_napi_schedule(struct napi_struct *napi, |
277 | struct virtqueue *vq) | |
278 | { | |
279 | if (napi_schedule_prep(napi)) { | |
280 | virtqueue_disable_cb(vq); | |
281 | __napi_schedule(napi); | |
282 | } | |
283 | } | |
284 | ||
285 | static void virtqueue_napi_complete(struct napi_struct *napi, | |
286 | struct virtqueue *vq, int processed) | |
287 | { | |
288 | int opaque; | |
289 | ||
290 | opaque = virtqueue_enable_cb_prepare(vq); | |
fdaa767a TM |
291 | if (napi_complete_done(napi, processed)) { |
292 | if (unlikely(virtqueue_poll(vq, opaque))) | |
293 | virtqueue_napi_schedule(napi, vq); | |
294 | } else { | |
295 | virtqueue_disable_cb(vq); | |
296 | } | |
e4e8452a WB |
297 | } |
298 | ||
e9d7417b | 299 | static void skb_xmit_done(struct virtqueue *vq) |
296f96fc | 300 | { |
e9d7417b | 301 | struct virtnet_info *vi = vq->vdev->priv; |
b92f1e67 | 302 | struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; |
296f96fc | 303 | |
2cb9c6ba | 304 | /* Suppress further interrupts. */ |
e9d7417b | 305 | virtqueue_disable_cb(vq); |
11a3a154 | 306 | |
b92f1e67 WB |
307 | if (napi->weight) |
308 | virtqueue_napi_schedule(napi, vq); | |
309 | else | |
310 | /* We were probably waiting for more output buffers. */ | |
311 | netif_wake_subqueue(vi->dev, vq2txq(vq)); | |
296f96fc RR |
312 | } |
313 | ||
28b39bc7 JW |
314 | #define MRG_CTX_HEADER_SHIFT 22 |
315 | static void *mergeable_len_to_ctx(unsigned int truesize, | |
316 | unsigned int headroom) | |
317 | { | |
318 | return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize); | |
319 | } | |
320 | ||
321 | static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx) | |
322 | { | |
323 | return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT; | |
324 | } | |
325 | ||
326 | static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) | |
327 | { | |
328 | return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); | |
329 | } | |
330 | ||
3464645a | 331 | /* Called from bottom half context */ |
946fa564 MT |
332 | static struct sk_buff *page_to_skb(struct virtnet_info *vi, |
333 | struct receive_queue *rq, | |
2613af0e MD |
334 | struct page *page, unsigned int offset, |
335 | unsigned int len, unsigned int truesize) | |
9ab86bbc SM |
336 | { |
337 | struct sk_buff *skb; | |
012873d0 | 338 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
2613af0e | 339 | unsigned int copy, hdr_len, hdr_padded_len; |
9ab86bbc | 340 | char *p; |
fb6813f4 | 341 | |
2613af0e | 342 | p = page_address(page) + offset; |
3f2c31d9 | 343 | |
9ab86bbc | 344 | /* copy small packet so we can reuse these pages for small data */ |
c67f5db8 | 345 | skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); |
9ab86bbc SM |
346 | if (unlikely(!skb)) |
347 | return NULL; | |
3f2c31d9 | 348 | |
9ab86bbc | 349 | hdr = skb_vnet_hdr(skb); |
3f2c31d9 | 350 | |
012873d0 MT |
351 | hdr_len = vi->hdr_len; |
352 | if (vi->mergeable_rx_bufs) | |
a4a76503 | 353 | hdr_padded_len = sizeof(*hdr); |
012873d0 | 354 | else |
2613af0e | 355 | hdr_padded_len = sizeof(struct padded_vnet_hdr); |
3f2c31d9 | 356 | |
9ab86bbc | 357 | memcpy(hdr, p, hdr_len); |
3f2c31d9 | 358 | |
9ab86bbc | 359 | len -= hdr_len; |
2613af0e MD |
360 | offset += hdr_padded_len; |
361 | p += hdr_padded_len; | |
3f2c31d9 | 362 | |
9ab86bbc SM |
363 | copy = len; |
364 | if (copy > skb_tailroom(skb)) | |
365 | copy = skb_tailroom(skb); | |
59ae1d12 | 366 | skb_put_data(skb, p, copy); |
3f2c31d9 | 367 | |
9ab86bbc SM |
368 | len -= copy; |
369 | offset += copy; | |
3f2c31d9 | 370 | |
2613af0e MD |
371 | if (vi->mergeable_rx_bufs) { |
372 | if (len) | |
373 | skb_add_rx_frag(skb, 0, page, offset, len, truesize); | |
374 | else | |
375 | put_page(page); | |
376 | return skb; | |
377 | } | |
378 | ||
e878d78b SL |
379 | /* |
380 | * Verify that we can indeed put this data into a skb. | |
381 | * This is here to handle cases when the device erroneously | |
382 | * tries to receive more than is possible. This is usually | |
383 | * the case of a broken device. | |
384 | */ | |
385 | if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { | |
be443899 | 386 | net_dbg_ratelimited("%s: too much data\n", skb->dev->name); |
e878d78b SL |
387 | dev_kfree_skb(skb); |
388 | return NULL; | |
389 | } | |
2613af0e | 390 | BUG_ON(offset >= PAGE_SIZE); |
9ab86bbc | 391 | while (len) { |
2613af0e MD |
392 | unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); |
393 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, | |
394 | frag_size, truesize); | |
395 | len -= frag_size; | |
9ab86bbc SM |
396 | page = (struct page *)page->private; |
397 | offset = 0; | |
398 | } | |
3f2c31d9 | 399 | |
9ab86bbc | 400 | if (page) |
e9d7417b | 401 | give_pages(rq, page); |
3f2c31d9 | 402 | |
9ab86bbc SM |
403 | return skb; |
404 | } | |
3f2c31d9 | 405 | |
186b3c99 JW |
406 | static void virtnet_xdp_flush(struct net_device *dev) |
407 | { | |
408 | struct virtnet_info *vi = netdev_priv(dev); | |
409 | struct send_queue *sq; | |
410 | unsigned int qp; | |
411 | ||
412 | qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); | |
413 | sq = &vi->sq[qp]; | |
414 | ||
415 | virtqueue_kick(sq->vq); | |
416 | } | |
417 | ||
418 | static bool __virtnet_xdp_xmit(struct virtnet_info *vi, | |
419 | struct xdp_buff *xdp) | |
56434a01 | 420 | { |
56434a01 | 421 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
f6b10209 | 422 | unsigned int len; |
722d8283 JF |
423 | struct send_queue *sq; |
424 | unsigned int qp; | |
56434a01 JF |
425 | void *xdp_sent; |
426 | int err; | |
427 | ||
722d8283 JF |
428 | qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); |
429 | sq = &vi->sq[qp]; | |
430 | ||
56434a01 JF |
431 | /* Free up any pending old buffers before queueing new ones. */ |
432 | while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) { | |
f6b10209 | 433 | struct page *sent_page = virt_to_head_page(xdp_sent); |
bb91accf | 434 | |
f6b10209 | 435 | put_page(sent_page); |
bb91accf | 436 | } |
56434a01 | 437 | |
f6b10209 JW |
438 | xdp->data -= vi->hdr_len; |
439 | /* Zero header and leave csum up to XDP layers */ | |
440 | hdr = xdp->data; | |
441 | memset(hdr, 0, vi->hdr_len); | |
bb91accf | 442 | |
f6b10209 | 443 | sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); |
bb91accf | 444 | |
f6b10209 | 445 | err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC); |
11b7d897 JDB |
446 | if (unlikely(err)) |
447 | return false; /* Caller handle free/refcnt */ | |
56434a01 | 448 | |
a67edbf4 | 449 | return true; |
56434a01 JF |
450 | } |
451 | ||
186b3c99 JW |
452 | static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) |
453 | { | |
454 | struct virtnet_info *vi = netdev_priv(dev); | |
8dcc5b0a JDB |
455 | struct receive_queue *rq = vi->rq; |
456 | struct bpf_prog *xdp_prog; | |
457 | bool sent; | |
186b3c99 | 458 | |
8dcc5b0a JDB |
459 | /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this |
460 | * indicate XDP resources have been successfully allocated. | |
461 | */ | |
462 | xdp_prog = rcu_dereference(rq->xdp_prog); | |
463 | if (!xdp_prog) | |
464 | return -ENXIO; | |
465 | ||
466 | sent = __virtnet_xdp_xmit(vi, xdp); | |
186b3c99 JW |
467 | if (!sent) |
468 | return -ENOSPC; | |
469 | return 0; | |
470 | } | |
471 | ||
f6b10209 JW |
472 | static unsigned int virtnet_get_headroom(struct virtnet_info *vi) |
473 | { | |
474 | return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0; | |
475 | } | |
476 | ||
4941d472 JW |
477 | /* We copy the packet for XDP in the following cases: |
478 | * | |
479 | * 1) Packet is scattered across multiple rx buffers. | |
480 | * 2) Headroom space is insufficient. | |
481 | * | |
482 | * This is inefficient but it's a temporary condition that | |
483 | * we hit right after XDP is enabled and until queue is refilled | |
484 | * with large buffers with sufficient headroom - so it should affect | |
485 | * at most queue size packets. | |
486 | * Afterwards, the conditions to enable | |
487 | * XDP should preclude the underlying device from sending packets | |
488 | * across multiple buffers (num_buf > 1), and we make sure buffers | |
489 | * have enough headroom. | |
490 | */ | |
491 | static struct page *xdp_linearize_page(struct receive_queue *rq, | |
492 | u16 *num_buf, | |
493 | struct page *p, | |
494 | int offset, | |
495 | int page_off, | |
496 | unsigned int *len) | |
497 | { | |
498 | struct page *page = alloc_page(GFP_ATOMIC); | |
499 | ||
500 | if (!page) | |
501 | return NULL; | |
502 | ||
503 | memcpy(page_address(page) + page_off, page_address(p) + offset, *len); | |
504 | page_off += *len; | |
505 | ||
506 | while (--*num_buf) { | |
3cc81a9a | 507 | int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
4941d472 JW |
508 | unsigned int buflen; |
509 | void *buf; | |
510 | int off; | |
511 | ||
512 | buf = virtqueue_get_buf(rq->vq, &buflen); | |
513 | if (unlikely(!buf)) | |
514 | goto err_buf; | |
515 | ||
516 | p = virt_to_head_page(buf); | |
517 | off = buf - page_address(p); | |
518 | ||
519 | /* guard against a misconfigured or uncooperative backend that | |
520 | * is sending packet larger than the MTU. | |
521 | */ | |
3cc81a9a | 522 | if ((page_off + buflen + tailroom) > PAGE_SIZE) { |
4941d472 JW |
523 | put_page(p); |
524 | goto err_buf; | |
525 | } | |
526 | ||
527 | memcpy(page_address(page) + page_off, | |
528 | page_address(p) + off, buflen); | |
529 | page_off += buflen; | |
530 | put_page(p); | |
531 | } | |
532 | ||
533 | /* Headroom does not contribute to packet length */ | |
534 | *len = page_off - VIRTIO_XDP_HEADROOM; | |
535 | return page; | |
536 | err_buf: | |
537 | __free_pages(page, 0); | |
538 | return NULL; | |
539 | } | |
540 | ||
bb91accf JW |
541 | static struct sk_buff *receive_small(struct net_device *dev, |
542 | struct virtnet_info *vi, | |
543 | struct receive_queue *rq, | |
192f68cf | 544 | void *buf, void *ctx, |
186b3c99 JW |
545 | unsigned int len, |
546 | bool *xdp_xmit) | |
f121159d | 547 | { |
f6b10209 | 548 | struct sk_buff *skb; |
bb91accf | 549 | struct bpf_prog *xdp_prog; |
4941d472 | 550 | unsigned int xdp_headroom = (unsigned long)ctx; |
f6b10209 JW |
551 | unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom; |
552 | unsigned int headroom = vi->hdr_len + header_offset; | |
553 | unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + | |
554 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
4941d472 | 555 | struct page *page = virt_to_head_page(buf); |
11b7d897 | 556 | unsigned int delta = 0; |
4941d472 | 557 | struct page *xdp_page; |
11b7d897 JDB |
558 | bool sent; |
559 | int err; | |
560 | ||
012873d0 | 561 | len -= vi->hdr_len; |
f121159d | 562 | |
bb91accf JW |
563 | rcu_read_lock(); |
564 | xdp_prog = rcu_dereference(rq->xdp_prog); | |
565 | if (xdp_prog) { | |
f6b10209 | 566 | struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; |
0354e4d1 | 567 | struct xdp_buff xdp; |
f6b10209 | 568 | void *orig_data; |
bb91accf JW |
569 | u32 act; |
570 | ||
95dbe9e7 | 571 | if (unlikely(hdr->hdr.gso_type)) |
bb91accf | 572 | goto err_xdp; |
0354e4d1 | 573 | |
4941d472 JW |
574 | if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { |
575 | int offset = buf - page_address(page) + header_offset; | |
576 | unsigned int tlen = len + vi->hdr_len; | |
577 | u16 num_buf = 1; | |
578 | ||
579 | xdp_headroom = virtnet_get_headroom(vi); | |
580 | header_offset = VIRTNET_RX_PAD + xdp_headroom; | |
581 | headroom = vi->hdr_len + header_offset; | |
582 | buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + | |
583 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
584 | xdp_page = xdp_linearize_page(rq, &num_buf, page, | |
585 | offset, header_offset, | |
586 | &tlen); | |
587 | if (!xdp_page) | |
588 | goto err_xdp; | |
589 | ||
590 | buf = page_address(xdp_page); | |
591 | put_page(page); | |
592 | page = xdp_page; | |
593 | } | |
594 | ||
f6b10209 JW |
595 | xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len; |
596 | xdp.data = xdp.data_hard_start + xdp_headroom; | |
de8f3a83 | 597 | xdp_set_data_meta_invalid(&xdp); |
0354e4d1 | 598 | xdp.data_end = xdp.data + len; |
754b8a21 | 599 | xdp.rxq = &rq->xdp_rxq; |
f6b10209 | 600 | orig_data = xdp.data; |
0354e4d1 JF |
601 | act = bpf_prog_run_xdp(xdp_prog, &xdp); |
602 | ||
bb91accf JW |
603 | switch (act) { |
604 | case XDP_PASS: | |
2de2f7f4 | 605 | /* Recalculate length in case bpf program changed it */ |
f6b10209 | 606 | delta = orig_data - xdp.data; |
bb91accf JW |
607 | break; |
608 | case XDP_TX: | |
11b7d897 JDB |
609 | sent = __virtnet_xdp_xmit(vi, &xdp); |
610 | if (unlikely(!sent)) { | |
0354e4d1 | 611 | trace_xdp_exception(vi->dev, xdp_prog, act); |
11b7d897 JDB |
612 | goto err_xdp; |
613 | } | |
614 | *xdp_xmit = true; | |
186b3c99 JW |
615 | rcu_read_unlock(); |
616 | goto xdp_xmit; | |
617 | case XDP_REDIRECT: | |
618 | err = xdp_do_redirect(dev, &xdp, xdp_prog); | |
11b7d897 JDB |
619 | if (err) |
620 | goto err_xdp; | |
621 | *xdp_xmit = true; | |
bb91accf JW |
622 | rcu_read_unlock(); |
623 | goto xdp_xmit; | |
bb91accf | 624 | default: |
0354e4d1 JF |
625 | bpf_warn_invalid_xdp_action(act); |
626 | case XDP_ABORTED: | |
627 | trace_xdp_exception(vi->dev, xdp_prog, act); | |
628 | case XDP_DROP: | |
bb91accf JW |
629 | goto err_xdp; |
630 | } | |
631 | } | |
632 | rcu_read_unlock(); | |
633 | ||
f6b10209 JW |
634 | skb = build_skb(buf, buflen); |
635 | if (!skb) { | |
4941d472 | 636 | put_page(page); |
f6b10209 JW |
637 | goto err; |
638 | } | |
639 | skb_reserve(skb, headroom - delta); | |
640 | skb_put(skb, len + delta); | |
641 | if (!delta) { | |
642 | buf += header_offset; | |
643 | memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); | |
644 | } /* keep zeroed vnet hdr since packet was changed by bpf */ | |
645 | ||
646 | err: | |
f121159d | 647 | return skb; |
bb91accf JW |
648 | |
649 | err_xdp: | |
650 | rcu_read_unlock(); | |
651 | dev->stats.rx_dropped++; | |
4941d472 | 652 | put_page(page); |
bb91accf JW |
653 | xdp_xmit: |
654 | return NULL; | |
f121159d MT |
655 | } |
656 | ||
657 | static struct sk_buff *receive_big(struct net_device *dev, | |
946fa564 | 658 | struct virtnet_info *vi, |
f121159d MT |
659 | struct receive_queue *rq, |
660 | void *buf, | |
661 | unsigned int len) | |
662 | { | |
663 | struct page *page = buf; | |
c47a43d3 | 664 | struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); |
f600b690 | 665 | |
f121159d MT |
666 | if (unlikely(!skb)) |
667 | goto err; | |
668 | ||
669 | return skb; | |
670 | ||
671 | err: | |
672 | dev->stats.rx_dropped++; | |
673 | give_pages(rq, page); | |
674 | return NULL; | |
675 | } | |
676 | ||
8fc3b9e9 | 677 | static struct sk_buff *receive_mergeable(struct net_device *dev, |
fdd819b2 | 678 | struct virtnet_info *vi, |
8fc3b9e9 | 679 | struct receive_queue *rq, |
680557cf MT |
680 | void *buf, |
681 | void *ctx, | |
186b3c99 JW |
682 | unsigned int len, |
683 | bool *xdp_xmit) | |
9ab86bbc | 684 | { |
012873d0 MT |
685 | struct virtio_net_hdr_mrg_rxbuf *hdr = buf; |
686 | u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); | |
8fc3b9e9 MT |
687 | struct page *page = virt_to_head_page(buf); |
688 | int offset = buf - page_address(page); | |
f600b690 JF |
689 | struct sk_buff *head_skb, *curr_skb; |
690 | struct bpf_prog *xdp_prog; | |
691 | unsigned int truesize; | |
4941d472 | 692 | unsigned int headroom = mergeable_ctx_to_headroom(ctx); |
11b7d897 | 693 | bool sent; |
3cc81a9a | 694 | int err; |
f600b690 | 695 | |
56434a01 JF |
696 | head_skb = NULL; |
697 | ||
f600b690 JF |
698 | rcu_read_lock(); |
699 | xdp_prog = rcu_dereference(rq->xdp_prog); | |
700 | if (xdp_prog) { | |
72979a6c | 701 | struct page *xdp_page; |
0354e4d1 | 702 | struct xdp_buff xdp; |
0354e4d1 | 703 | void *data; |
f600b690 JF |
704 | u32 act; |
705 | ||
3cc81a9a JW |
706 | /* This happens when rx buffer size is underestimated |
707 | * or headroom is not enough because of the buffer | |
708 | * was refilled before XDP is set. This should only | |
709 | * happen for the first several packets, so we don't | |
710 | * care much about its performance. | |
711 | */ | |
4941d472 JW |
712 | if (unlikely(num_buf > 1 || |
713 | headroom < virtnet_get_headroom(vi))) { | |
72979a6c | 714 | /* linearize data for XDP */ |
56a86f84 | 715 | xdp_page = xdp_linearize_page(rq, &num_buf, |
4941d472 JW |
716 | page, offset, |
717 | VIRTIO_XDP_HEADROOM, | |
718 | &len); | |
72979a6c JF |
719 | if (!xdp_page) |
720 | goto err_xdp; | |
2de2f7f4 | 721 | offset = VIRTIO_XDP_HEADROOM; |
72979a6c JF |
722 | } else { |
723 | xdp_page = page; | |
f600b690 JF |
724 | } |
725 | ||
726 | /* Transient failure which in theory could occur if | |
727 | * in-flight packets from before XDP was enabled reach | |
728 | * the receive path after XDP is loaded. In practice I | |
729 | * was not able to create this condition. | |
730 | */ | |
b00f70b0 | 731 | if (unlikely(hdr->hdr.gso_type)) |
f600b690 JF |
732 | goto err_xdp; |
733 | ||
2de2f7f4 JF |
734 | /* Allow consuming headroom but reserve enough space to push |
735 | * the descriptor on if we get an XDP_TX return code. | |
736 | */ | |
0354e4d1 | 737 | data = page_address(xdp_page) + offset; |
2de2f7f4 | 738 | xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len; |
0354e4d1 | 739 | xdp.data = data + vi->hdr_len; |
de8f3a83 | 740 | xdp_set_data_meta_invalid(&xdp); |
0354e4d1 | 741 | xdp.data_end = xdp.data + (len - vi->hdr_len); |
754b8a21 JDB |
742 | xdp.rxq = &rq->xdp_rxq; |
743 | ||
0354e4d1 JF |
744 | act = bpf_prog_run_xdp(xdp_prog, &xdp); |
745 | ||
56434a01 JF |
746 | switch (act) { |
747 | case XDP_PASS: | |
2de2f7f4 JF |
748 | /* recalculate offset to account for any header |
749 | * adjustments. Note other cases do not build an | |
750 | * skb and avoid using offset | |
751 | */ | |
752 | offset = xdp.data - | |
753 | page_address(xdp_page) - vi->hdr_len; | |
754 | ||
1830f893 JW |
755 | /* We can only create skb based on xdp_page. */ |
756 | if (unlikely(xdp_page != page)) { | |
757 | rcu_read_unlock(); | |
758 | put_page(page); | |
759 | head_skb = page_to_skb(vi, rq, xdp_page, | |
2de2f7f4 | 760 | offset, len, PAGE_SIZE); |
1830f893 JW |
761 | return head_skb; |
762 | } | |
56434a01 JF |
763 | break; |
764 | case XDP_TX: | |
11b7d897 JDB |
765 | sent = __virtnet_xdp_xmit(vi, &xdp); |
766 | if (unlikely(!sent)) { | |
0354e4d1 | 767 | trace_xdp_exception(vi->dev, xdp_prog, act); |
11b7d897 JDB |
768 | if (unlikely(xdp_page != page)) |
769 | put_page(xdp_page); | |
770 | goto err_xdp; | |
771 | } | |
772 | *xdp_xmit = true; | |
72979a6c JF |
773 | if (unlikely(xdp_page != page)) |
774 | goto err_xdp; | |
56434a01 JF |
775 | rcu_read_unlock(); |
776 | goto xdp_xmit; | |
3cc81a9a JW |
777 | case XDP_REDIRECT: |
778 | err = xdp_do_redirect(dev, &xdp, xdp_prog); | |
779 | if (err) { | |
780 | if (unlikely(xdp_page != page)) | |
781 | put_page(xdp_page); | |
782 | goto err_xdp; | |
783 | } | |
784 | *xdp_xmit = true; | |
785 | if (unlikely(xdp_page != page)) | |
786 | goto err_xdp; | |
787 | rcu_read_unlock(); | |
788 | goto xdp_xmit; | |
56434a01 | 789 | default: |
0354e4d1 JF |
790 | bpf_warn_invalid_xdp_action(act); |
791 | case XDP_ABORTED: | |
792 | trace_xdp_exception(vi->dev, xdp_prog, act); | |
793 | case XDP_DROP: | |
72979a6c JF |
794 | if (unlikely(xdp_page != page)) |
795 | __free_pages(xdp_page, 0); | |
f600b690 | 796 | goto err_xdp; |
56434a01 | 797 | } |
f600b690 JF |
798 | } |
799 | rcu_read_unlock(); | |
ab7db917 | 800 | |
28b39bc7 JW |
801 | truesize = mergeable_ctx_to_truesize(ctx); |
802 | if (unlikely(len > truesize)) { | |
56da5fd0 | 803 | pr_debug("%s: rx error: len %u exceeds truesize %lu\n", |
680557cf MT |
804 | dev->name, len, (unsigned long)ctx); |
805 | dev->stats.rx_length_errors++; | |
806 | goto err_skb; | |
807 | } | |
28b39bc7 | 808 | |
f600b690 JF |
809 | head_skb = page_to_skb(vi, rq, page, offset, len, truesize); |
810 | curr_skb = head_skb; | |
9ab86bbc | 811 | |
8fc3b9e9 MT |
812 | if (unlikely(!curr_skb)) |
813 | goto err_skb; | |
9ab86bbc | 814 | while (--num_buf) { |
8fc3b9e9 MT |
815 | int num_skb_frags; |
816 | ||
680557cf | 817 | buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); |
03e9f8a0 | 818 | if (unlikely(!buf)) { |
8fc3b9e9 | 819 | pr_debug("%s: rx error: %d buffers out of %d missing\n", |
fdd819b2 | 820 | dev->name, num_buf, |
012873d0 MT |
821 | virtio16_to_cpu(vi->vdev, |
822 | hdr->num_buffers)); | |
8fc3b9e9 MT |
823 | dev->stats.rx_length_errors++; |
824 | goto err_buf; | |
3f2c31d9 | 825 | } |
8fc3b9e9 MT |
826 | |
827 | page = virt_to_head_page(buf); | |
28b39bc7 JW |
828 | |
829 | truesize = mergeable_ctx_to_truesize(ctx); | |
830 | if (unlikely(len > truesize)) { | |
56da5fd0 | 831 | pr_debug("%s: rx error: len %u exceeds truesize %lu\n", |
680557cf MT |
832 | dev->name, len, (unsigned long)ctx); |
833 | dev->stats.rx_length_errors++; | |
834 | goto err_skb; | |
835 | } | |
8fc3b9e9 MT |
836 | |
837 | num_skb_frags = skb_shinfo(curr_skb)->nr_frags; | |
2613af0e MD |
838 | if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { |
839 | struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); | |
8fc3b9e9 MT |
840 | |
841 | if (unlikely(!nskb)) | |
842 | goto err_skb; | |
2613af0e MD |
843 | if (curr_skb == head_skb) |
844 | skb_shinfo(curr_skb)->frag_list = nskb; | |
845 | else | |
846 | curr_skb->next = nskb; | |
847 | curr_skb = nskb; | |
848 | head_skb->truesize += nskb->truesize; | |
849 | num_skb_frags = 0; | |
850 | } | |
851 | if (curr_skb != head_skb) { | |
852 | head_skb->data_len += len; | |
853 | head_skb->len += len; | |
fb51879d | 854 | head_skb->truesize += truesize; |
2613af0e | 855 | } |
8fc3b9e9 | 856 | offset = buf - page_address(page); |
ba275241 JW |
857 | if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { |
858 | put_page(page); | |
859 | skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, | |
fb51879d | 860 | len, truesize); |
ba275241 JW |
861 | } else { |
862 | skb_add_rx_frag(curr_skb, num_skb_frags, page, | |
fb51879d | 863 | offset, len, truesize); |
ba275241 | 864 | } |
8fc3b9e9 MT |
865 | } |
866 | ||
5377d758 | 867 | ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); |
8fc3b9e9 MT |
868 | return head_skb; |
869 | ||
f600b690 JF |
870 | err_xdp: |
871 | rcu_read_unlock(); | |
8fc3b9e9 MT |
872 | err_skb: |
873 | put_page(page); | |
874 | while (--num_buf) { | |
680557cf MT |
875 | buf = virtqueue_get_buf(rq->vq, &len); |
876 | if (unlikely(!buf)) { | |
8fc3b9e9 MT |
877 | pr_debug("%s: rx error: %d buffers missing\n", |
878 | dev->name, num_buf); | |
879 | dev->stats.rx_length_errors++; | |
880 | break; | |
881 | } | |
680557cf | 882 | page = virt_to_head_page(buf); |
8fc3b9e9 | 883 | put_page(page); |
9ab86bbc | 884 | } |
8fc3b9e9 MT |
885 | err_buf: |
886 | dev->stats.rx_dropped++; | |
887 | dev_kfree_skb(head_skb); | |
56434a01 | 888 | xdp_xmit: |
8fc3b9e9 | 889 | return NULL; |
9ab86bbc SM |
890 | } |
891 | ||
61845d20 | 892 | static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, |
186b3c99 | 893 | void *buf, unsigned int len, void **ctx, bool *xdp_xmit) |
9ab86bbc | 894 | { |
e9d7417b | 895 | struct net_device *dev = vi->dev; |
9ab86bbc | 896 | struct sk_buff *skb; |
012873d0 | 897 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
61845d20 | 898 | int ret; |
3f2c31d9 | 899 | |
bcff3162 | 900 | if (unlikely(len < vi->hdr_len + ETH_HLEN)) { |
9ab86bbc SM |
901 | pr_debug("%s: short packet %i\n", dev->name, len); |
902 | dev->stats.rx_length_errors++; | |
ab7db917 | 903 | if (vi->mergeable_rx_bufs) { |
680557cf | 904 | put_page(virt_to_head_page(buf)); |
ab7db917 | 905 | } else if (vi->big_packets) { |
98bfd23c | 906 | give_pages(rq, buf); |
ab7db917 | 907 | } else { |
f6b10209 | 908 | put_page(virt_to_head_page(buf)); |
ab7db917 | 909 | } |
61845d20 | 910 | return 0; |
9ab86bbc | 911 | } |
3f2c31d9 | 912 | |
f121159d | 913 | if (vi->mergeable_rx_bufs) |
186b3c99 | 914 | skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit); |
f121159d | 915 | else if (vi->big_packets) |
946fa564 | 916 | skb = receive_big(dev, vi, rq, buf, len); |
f121159d | 917 | else |
186b3c99 | 918 | skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit); |
f121159d MT |
919 | |
920 | if (unlikely(!skb)) | |
61845d20 | 921 | return 0; |
3f2c31d9 | 922 | |
9ab86bbc | 923 | hdr = skb_vnet_hdr(skb); |
3fa2a1df | 924 | |
61845d20 | 925 | ret = skb->len; |
296f96fc | 926 | |
e858fae2 | 927 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) |
10a8d94a | 928 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
296f96fc | 929 | |
e858fae2 MR |
930 | if (virtio_net_hdr_to_skb(skb, &hdr->hdr, |
931 | virtio_is_little_endian(vi->vdev))) { | |
932 | net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", | |
933 | dev->name, hdr->hdr.gso_type, | |
934 | hdr->hdr.gso_size); | |
935 | goto frame_err; | |
296f96fc RR |
936 | } |
937 | ||
d1dc06dc MR |
938 | skb->protocol = eth_type_trans(skb, dev); |
939 | pr_debug("Receiving skb proto 0x%04x len %i type %i\n", | |
940 | ntohs(skb->protocol), skb->len, skb->pkt_type); | |
941 | ||
0fbd050a | 942 | napi_gro_receive(&rq->napi, skb); |
61845d20 | 943 | return ret; |
296f96fc RR |
944 | |
945 | frame_err: | |
946 | dev->stats.rx_frame_errors++; | |
296f96fc | 947 | dev_kfree_skb(skb); |
61845d20 | 948 | return 0; |
296f96fc RR |
949 | } |
950 | ||
192f68cf JW |
951 | /* Unlike mergeable buffers, all buffers are allocated to the |
952 | * same size, except for the headroom. For this reason we do | |
953 | * not need to use mergeable_len_to_ctx here - it is enough | |
954 | * to store the headroom as the context ignoring the truesize. | |
955 | */ | |
946fa564 MT |
956 | static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, |
957 | gfp_t gfp) | |
296f96fc | 958 | { |
f6b10209 JW |
959 | struct page_frag *alloc_frag = &rq->alloc_frag; |
960 | char *buf; | |
2de2f7f4 | 961 | unsigned int xdp_headroom = virtnet_get_headroom(vi); |
192f68cf | 962 | void *ctx = (void *)(unsigned long)xdp_headroom; |
f6b10209 | 963 | int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; |
9ab86bbc | 964 | int err; |
3f2c31d9 | 965 | |
f6b10209 JW |
966 | len = SKB_DATA_ALIGN(len) + |
967 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
968 | if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) | |
9ab86bbc | 969 | return -ENOMEM; |
296f96fc | 970 | |
f6b10209 JW |
971 | buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; |
972 | get_page(alloc_frag->page); | |
973 | alloc_frag->offset += len; | |
974 | sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom, | |
975 | vi->hdr_len + GOOD_PACKET_LEN); | |
192f68cf | 976 | err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); |
9ab86bbc | 977 | if (err < 0) |
f6b10209 | 978 | put_page(virt_to_head_page(buf)); |
9ab86bbc SM |
979 | return err; |
980 | } | |
97402b96 | 981 | |
012873d0 MT |
982 | static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, |
983 | gfp_t gfp) | |
9ab86bbc | 984 | { |
9ab86bbc SM |
985 | struct page *first, *list = NULL; |
986 | char *p; | |
987 | int i, err, offset; | |
988 | ||
a5835440 RR |
989 | sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); |
990 | ||
e9d7417b | 991 | /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ |
9ab86bbc | 992 | for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { |
e9d7417b | 993 | first = get_a_page(rq, gfp); |
9ab86bbc SM |
994 | if (!first) { |
995 | if (list) | |
e9d7417b | 996 | give_pages(rq, list); |
9ab86bbc | 997 | return -ENOMEM; |
97402b96 | 998 | } |
e9d7417b | 999 | sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); |
97402b96 | 1000 | |
9ab86bbc SM |
1001 | /* chain new page in list head to match sg */ |
1002 | first->private = (unsigned long)list; | |
1003 | list = first; | |
1004 | } | |
296f96fc | 1005 | |
e9d7417b | 1006 | first = get_a_page(rq, gfp); |
9ab86bbc | 1007 | if (!first) { |
e9d7417b | 1008 | give_pages(rq, list); |
9ab86bbc SM |
1009 | return -ENOMEM; |
1010 | } | |
1011 | p = page_address(first); | |
1012 | ||
e9d7417b | 1013 | /* rq->sg[0], rq->sg[1] share the same page */ |
012873d0 MT |
1014 | /* a separated rq->sg[0] for header - required in case !any_header_sg */ |
1015 | sg_set_buf(&rq->sg[0], p, vi->hdr_len); | |
9ab86bbc | 1016 | |
e9d7417b | 1017 | /* rq->sg[1] for data packet, from offset */ |
9ab86bbc | 1018 | offset = sizeof(struct padded_vnet_hdr); |
e9d7417b | 1019 | sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); |
9ab86bbc SM |
1020 | |
1021 | /* chain first in list head */ | |
1022 | first->private = (unsigned long)list; | |
9dc7b9e4 RR |
1023 | err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, |
1024 | first, gfp); | |
9ab86bbc | 1025 | if (err < 0) |
e9d7417b | 1026 | give_pages(rq, first); |
9ab86bbc SM |
1027 | |
1028 | return err; | |
296f96fc RR |
1029 | } |
1030 | ||
d85b758f | 1031 | static unsigned int get_mergeable_buf_len(struct receive_queue *rq, |
3cc81a9a JW |
1032 | struct ewma_pkt_len *avg_pkt_len, |
1033 | unsigned int room) | |
3f2c31d9 | 1034 | { |
ab7db917 | 1035 | const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
fbf28d78 MD |
1036 | unsigned int len; |
1037 | ||
3cc81a9a JW |
1038 | if (room) |
1039 | return PAGE_SIZE - room; | |
1040 | ||
1041 | len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), | |
f0c3192c | 1042 | rq->min_buf_len, PAGE_SIZE - hdr_len); |
3cc81a9a | 1043 | |
e377fcc8 | 1044 | return ALIGN(len, L1_CACHE_BYTES); |
fbf28d78 MD |
1045 | } |
1046 | ||
2de2f7f4 JF |
1047 | static int add_recvbuf_mergeable(struct virtnet_info *vi, |
1048 | struct receive_queue *rq, gfp_t gfp) | |
fbf28d78 | 1049 | { |
fb51879d | 1050 | struct page_frag *alloc_frag = &rq->alloc_frag; |
2de2f7f4 | 1051 | unsigned int headroom = virtnet_get_headroom(vi); |
3cc81a9a JW |
1052 | unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; |
1053 | unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); | |
fb51879d | 1054 | char *buf; |
680557cf | 1055 | void *ctx; |
3f2c31d9 | 1056 | int err; |
fb51879d | 1057 | unsigned int len, hole; |
3f2c31d9 | 1058 | |
3cc81a9a JW |
1059 | /* Extra tailroom is needed to satisfy XDP's assumption. This |
1060 | * means rx frags coalescing won't work, but consider we've | |
1061 | * disabled GSO for XDP, it won't be a big issue. | |
1062 | */ | |
1063 | len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); | |
1064 | if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp))) | |
9ab86bbc | 1065 | return -ENOMEM; |
ab7db917 | 1066 | |
fb51879d | 1067 | buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; |
2de2f7f4 | 1068 | buf += headroom; /* advance address leaving hole at front of pkt */ |
fb51879d | 1069 | get_page(alloc_frag->page); |
3cc81a9a | 1070 | alloc_frag->offset += len + room; |
fb51879d | 1071 | hole = alloc_frag->size - alloc_frag->offset; |
3cc81a9a | 1072 | if (hole < len + room) { |
ab7db917 MD |
1073 | /* To avoid internal fragmentation, if there is very likely not |
1074 | * enough space for another buffer, add the remaining space to | |
1daa8790 | 1075 | * the current buffer. |
ab7db917 | 1076 | */ |
fb51879d MD |
1077 | len += hole; |
1078 | alloc_frag->offset += hole; | |
1079 | } | |
3f2c31d9 | 1080 | |
fb51879d | 1081 | sg_init_one(rq->sg, buf, len); |
29fda25a | 1082 | ctx = mergeable_len_to_ctx(len, headroom); |
680557cf | 1083 | err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); |
9ab86bbc | 1084 | if (err < 0) |
2613af0e | 1085 | put_page(virt_to_head_page(buf)); |
3f2c31d9 | 1086 | |
9ab86bbc SM |
1087 | return err; |
1088 | } | |
3f2c31d9 | 1089 | |
b2baed69 RR |
1090 | /* |
1091 | * Returns false if we couldn't fill entirely (OOM). | |
1092 | * | |
1093 | * Normally run in the receive path, but can also be run from ndo_open | |
1094 | * before we're receiving packets, or from refill_work which is | |
1095 | * careful to disable receiving (using napi_disable). | |
1096 | */ | |
946fa564 MT |
1097 | static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, |
1098 | gfp_t gfp) | |
9ab86bbc SM |
1099 | { |
1100 | int err; | |
1788f495 | 1101 | bool oom; |
3f2c31d9 | 1102 | |
9ab86bbc SM |
1103 | do { |
1104 | if (vi->mergeable_rx_bufs) | |
2de2f7f4 | 1105 | err = add_recvbuf_mergeable(vi, rq, gfp); |
9ab86bbc | 1106 | else if (vi->big_packets) |
012873d0 | 1107 | err = add_recvbuf_big(vi, rq, gfp); |
9ab86bbc | 1108 | else |
946fa564 | 1109 | err = add_recvbuf_small(vi, rq, gfp); |
3f2c31d9 | 1110 | |
1788f495 | 1111 | oom = err == -ENOMEM; |
9ed4cb07 | 1112 | if (err) |
3f2c31d9 | 1113 | break; |
b7dfde95 | 1114 | } while (rq->vq->num_free); |
681daee2 | 1115 | virtqueue_kick(rq->vq); |
3161e453 | 1116 | return !oom; |
3f2c31d9 MM |
1117 | } |
1118 | ||
18445c4d | 1119 | static void skb_recv_done(struct virtqueue *rvq) |
296f96fc RR |
1120 | { |
1121 | struct virtnet_info *vi = rvq->vdev->priv; | |
986a4f4d | 1122 | struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; |
e9d7417b | 1123 | |
e4e8452a | 1124 | virtqueue_napi_schedule(&rq->napi, rvq); |
296f96fc RR |
1125 | } |
1126 | ||
e4e8452a | 1127 | static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi) |
3e9d08ec | 1128 | { |
e4e8452a | 1129 | napi_enable(napi); |
3e9d08ec BR |
1130 | |
1131 | /* If all buffers were filled by other side before we napi_enabled, we | |
e4e8452a WB |
1132 | * won't get another interrupt, so process any outstanding packets now. |
1133 | * Call local_bh_enable after to trigger softIRQ processing. | |
1134 | */ | |
1135 | local_bh_disable(); | |
1136 | virtqueue_napi_schedule(napi, vq); | |
1137 | local_bh_enable(); | |
3e9d08ec BR |
1138 | } |
1139 | ||
b92f1e67 WB |
1140 | static void virtnet_napi_tx_enable(struct virtnet_info *vi, |
1141 | struct virtqueue *vq, | |
1142 | struct napi_struct *napi) | |
1143 | { | |
1144 | if (!napi->weight) | |
1145 | return; | |
1146 | ||
1147 | /* Tx napi touches cachelines on the cpu handling tx interrupts. Only | |
1148 | * enable the feature if this is likely affine with the transmit path. | |
1149 | */ | |
1150 | if (!vi->affinity_hint_set) { | |
1151 | napi->weight = 0; | |
1152 | return; | |
1153 | } | |
1154 | ||
1155 | return virtnet_napi_enable(vq, napi); | |
1156 | } | |
1157 | ||
78a57b48 WB |
1158 | static void virtnet_napi_tx_disable(struct napi_struct *napi) |
1159 | { | |
1160 | if (napi->weight) | |
1161 | napi_disable(napi); | |
1162 | } | |
1163 | ||
3161e453 RR |
1164 | static void refill_work(struct work_struct *work) |
1165 | { | |
e9d7417b JW |
1166 | struct virtnet_info *vi = |
1167 | container_of(work, struct virtnet_info, refill.work); | |
3161e453 | 1168 | bool still_empty; |
986a4f4d JW |
1169 | int i; |
1170 | ||
55257d72 | 1171 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
986a4f4d | 1172 | struct receive_queue *rq = &vi->rq[i]; |
3161e453 | 1173 | |
986a4f4d | 1174 | napi_disable(&rq->napi); |
946fa564 | 1175 | still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); |
e4e8452a | 1176 | virtnet_napi_enable(rq->vq, &rq->napi); |
3161e453 | 1177 | |
986a4f4d JW |
1178 | /* In theory, this can happen: if we don't get any buffers in |
1179 | * we will *never* try to fill again. | |
1180 | */ | |
1181 | if (still_empty) | |
1182 | schedule_delayed_work(&vi->refill, HZ/2); | |
1183 | } | |
3161e453 RR |
1184 | } |
1185 | ||
186b3c99 | 1186 | static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit) |
296f96fc | 1187 | { |
e9d7417b | 1188 | struct virtnet_info *vi = rq->vq->vdev->priv; |
61845d20 | 1189 | unsigned int len, received = 0, bytes = 0; |
9ab86bbc | 1190 | void *buf; |
296f96fc | 1191 | |
192f68cf | 1192 | if (!vi->big_packets || vi->mergeable_rx_bufs) { |
680557cf MT |
1193 | void *ctx; |
1194 | ||
1195 | while (received < budget && | |
1196 | (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { | |
186b3c99 | 1197 | bytes += receive_buf(vi, rq, buf, len, ctx, xdp_xmit); |
680557cf MT |
1198 | received++; |
1199 | } | |
1200 | } else { | |
1201 | while (received < budget && | |
1202 | (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { | |
186b3c99 | 1203 | bytes += receive_buf(vi, rq, buf, len, NULL, xdp_xmit); |
680557cf MT |
1204 | received++; |
1205 | } | |
296f96fc RR |
1206 | } |
1207 | ||
be121f46 | 1208 | if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { |
946fa564 | 1209 | if (!try_fill_recv(vi, rq, GFP_ATOMIC)) |
3b07e9ca | 1210 | schedule_delayed_work(&vi->refill, 0); |
3161e453 | 1211 | } |
296f96fc | 1212 | |
d7dfc5cf TM |
1213 | u64_stats_update_begin(&rq->stats.syncp); |
1214 | rq->stats.bytes += bytes; | |
1215 | rq->stats.packets += received; | |
1216 | u64_stats_update_end(&rq->stats.syncp); | |
61845d20 | 1217 | |
2ffa7598 JW |
1218 | return received; |
1219 | } | |
1220 | ||
ea7735d9 WB |
1221 | static void free_old_xmit_skbs(struct send_queue *sq) |
1222 | { | |
1223 | struct sk_buff *skb; | |
1224 | unsigned int len; | |
ea7735d9 WB |
1225 | unsigned int packets = 0; |
1226 | unsigned int bytes = 0; | |
1227 | ||
1228 | while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { | |
1229 | pr_debug("Sent skb %p\n", skb); | |
1230 | ||
1231 | bytes += skb->len; | |
1232 | packets++; | |
1233 | ||
dadc0736 | 1234 | dev_consume_skb_any(skb); |
ea7735d9 WB |
1235 | } |
1236 | ||
1237 | /* Avoid overhead when no packets have been processed | |
1238 | * happens when called speculatively from start_xmit. | |
1239 | */ | |
1240 | if (!packets) | |
1241 | return; | |
1242 | ||
d7dfc5cf TM |
1243 | u64_stats_update_begin(&sq->stats.syncp); |
1244 | sq->stats.bytes += bytes; | |
1245 | sq->stats.packets += packets; | |
1246 | u64_stats_update_end(&sq->stats.syncp); | |
ea7735d9 WB |
1247 | } |
1248 | ||
7b0411ef WB |
1249 | static void virtnet_poll_cleantx(struct receive_queue *rq) |
1250 | { | |
1251 | struct virtnet_info *vi = rq->vq->vdev->priv; | |
1252 | unsigned int index = vq2rxq(rq->vq); | |
1253 | struct send_queue *sq = &vi->sq[index]; | |
1254 | struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); | |
1255 | ||
1256 | if (!sq->napi.weight) | |
1257 | return; | |
1258 | ||
1259 | if (__netif_tx_trylock(txq)) { | |
1260 | free_old_xmit_skbs(sq); | |
1261 | __netif_tx_unlock(txq); | |
1262 | } | |
1263 | ||
1264 | if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) | |
1265 | netif_tx_wake_queue(txq); | |
1266 | } | |
1267 | ||
2ffa7598 JW |
1268 | static int virtnet_poll(struct napi_struct *napi, int budget) |
1269 | { | |
1270 | struct receive_queue *rq = | |
1271 | container_of(napi, struct receive_queue, napi); | |
e4e8452a | 1272 | unsigned int received; |
186b3c99 | 1273 | bool xdp_xmit = false; |
2ffa7598 | 1274 | |
7b0411ef WB |
1275 | virtnet_poll_cleantx(rq); |
1276 | ||
186b3c99 | 1277 | received = virtnet_receive(rq, budget, &xdp_xmit); |
2ffa7598 | 1278 | |
8329d98e | 1279 | /* Out of packets? */ |
e4e8452a WB |
1280 | if (received < budget) |
1281 | virtqueue_napi_complete(napi, rq->vq, received); | |
296f96fc | 1282 | |
186b3c99 JW |
1283 | if (xdp_xmit) |
1284 | xdp_do_flush_map(); | |
1285 | ||
296f96fc RR |
1286 | return received; |
1287 | } | |
1288 | ||
986a4f4d JW |
1289 | static int virtnet_open(struct net_device *dev) |
1290 | { | |
1291 | struct virtnet_info *vi = netdev_priv(dev); | |
754b8a21 | 1292 | int i, err; |
986a4f4d | 1293 | |
e4166625 JW |
1294 | for (i = 0; i < vi->max_queue_pairs; i++) { |
1295 | if (i < vi->curr_queue_pairs) | |
1296 | /* Make sure we have some buffers: if oom use wq. */ | |
946fa564 | 1297 | if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) |
e4166625 | 1298 | schedule_delayed_work(&vi->refill, 0); |
754b8a21 JDB |
1299 | |
1300 | err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i); | |
1301 | if (err < 0) | |
1302 | return err; | |
1303 | ||
e4e8452a | 1304 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); |
b92f1e67 | 1305 | virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); |
986a4f4d JW |
1306 | } |
1307 | ||
1308 | return 0; | |
1309 | } | |
1310 | ||
b92f1e67 WB |
1311 | static int virtnet_poll_tx(struct napi_struct *napi, int budget) |
1312 | { | |
1313 | struct send_queue *sq = container_of(napi, struct send_queue, napi); | |
1314 | struct virtnet_info *vi = sq->vq->vdev->priv; | |
1315 | struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); | |
1316 | ||
1317 | __netif_tx_lock(txq, raw_smp_processor_id()); | |
1318 | free_old_xmit_skbs(sq); | |
1319 | __netif_tx_unlock(txq); | |
1320 | ||
1321 | virtqueue_napi_complete(napi, sq->vq, 0); | |
1322 | ||
1323 | if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) | |
1324 | netif_tx_wake_queue(txq); | |
1325 | ||
1326 | return 0; | |
1327 | } | |
1328 | ||
e9d7417b | 1329 | static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) |
296f96fc | 1330 | { |
012873d0 | 1331 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
296f96fc | 1332 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; |
e9d7417b | 1333 | struct virtnet_info *vi = sq->vq->vdev->priv; |
e2fcad58 | 1334 | int num_sg; |
012873d0 | 1335 | unsigned hdr_len = vi->hdr_len; |
e7428e95 | 1336 | bool can_push; |
296f96fc | 1337 | |
e174961c | 1338 | pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); |
e7428e95 MT |
1339 | |
1340 | can_push = vi->any_header_sg && | |
1341 | !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && | |
1342 | !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; | |
1343 | /* Even if we can, don't push here yet as this would skew | |
1344 | * csum_start offset below. */ | |
1345 | if (can_push) | |
012873d0 | 1346 | hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); |
e7428e95 MT |
1347 | else |
1348 | hdr = skb_vnet_hdr(skb); | |
296f96fc | 1349 | |
e858fae2 | 1350 | if (virtio_net_hdr_from_skb(skb, &hdr->hdr, |
6391a448 | 1351 | virtio_is_little_endian(vi->vdev), false)) |
e858fae2 | 1352 | BUG(); |
296f96fc | 1353 | |
3f2c31d9 | 1354 | if (vi->mergeable_rx_bufs) |
012873d0 | 1355 | hdr->num_buffers = 0; |
3f2c31d9 | 1356 | |
547c890c | 1357 | sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); |
e7428e95 MT |
1358 | if (can_push) { |
1359 | __skb_push(skb, hdr_len); | |
1360 | num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); | |
e2fcad58 JD |
1361 | if (unlikely(num_sg < 0)) |
1362 | return num_sg; | |
e7428e95 MT |
1363 | /* Pull header back to avoid skew in tx bytes calculations. */ |
1364 | __skb_pull(skb, hdr_len); | |
1365 | } else { | |
1366 | sg_set_buf(sq->sg, hdr, hdr_len); | |
e2fcad58 JD |
1367 | num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); |
1368 | if (unlikely(num_sg < 0)) | |
1369 | return num_sg; | |
1370 | num_sg++; | |
e7428e95 | 1371 | } |
9dc7b9e4 | 1372 | return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); |
11a3a154 RR |
1373 | } |
1374 | ||
424efe9c | 1375 | static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) |
99ffc696 RR |
1376 | { |
1377 | struct virtnet_info *vi = netdev_priv(dev); | |
986a4f4d JW |
1378 | int qnum = skb_get_queue_mapping(skb); |
1379 | struct send_queue *sq = &vi->sq[qnum]; | |
9ed4cb07 | 1380 | int err; |
4b7fd2e6 MT |
1381 | struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); |
1382 | bool kick = !skb->xmit_more; | |
b92f1e67 | 1383 | bool use_napi = sq->napi.weight; |
2cb9c6ba | 1384 | |
2cb9c6ba | 1385 | /* Free up any pending old buffers before queueing new ones. */ |
e9d7417b | 1386 | free_old_xmit_skbs(sq); |
99ffc696 | 1387 | |
bdb12e0d WB |
1388 | if (use_napi && kick) |
1389 | virtqueue_enable_cb_delayed(sq->vq); | |
1390 | ||
074c3582 JK |
1391 | /* timestamp packet in software */ |
1392 | skb_tx_timestamp(skb); | |
1393 | ||
03f191ba | 1394 | /* Try to transmit */ |
b7dfde95 | 1395 | err = xmit_skb(sq, skb); |
48925e37 | 1396 | |
9ed4cb07 | 1397 | /* This should not happen! */ |
681daee2 | 1398 | if (unlikely(err)) { |
9ed4cb07 RR |
1399 | dev->stats.tx_fifo_errors++; |
1400 | if (net_ratelimit()) | |
1401 | dev_warn(&dev->dev, | |
b7dfde95 | 1402 | "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); |
58eba97d | 1403 | dev->stats.tx_dropped++; |
85e94525 | 1404 | dev_kfree_skb_any(skb); |
58eba97d | 1405 | return NETDEV_TX_OK; |
296f96fc | 1406 | } |
03f191ba | 1407 | |
48925e37 | 1408 | /* Don't wait up for transmitted skbs to be freed. */ |
b92f1e67 WB |
1409 | if (!use_napi) { |
1410 | skb_orphan(skb); | |
1411 | nf_reset(skb); | |
1412 | } | |
48925e37 | 1413 | |
60302ff6 MT |
1414 | /* If running out of space, stop queue to avoid getting packets that we |
1415 | * are then unable to transmit. | |
1416 | * An alternative would be to force queuing layer to requeue the skb by | |
1417 | * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be | |
1418 | * returned in a normal path of operation: it means that driver is not | |
1419 | * maintaining the TX queue stop/start state properly, and causes | |
1420 | * the stack to do a non-trivial amount of useless work. | |
1421 | * Since most packets only take 1 or 2 ring slots, stopping the queue | |
1422 | * early means 16 slots are typically wasted. | |
d631b94e | 1423 | */ |
b7dfde95 | 1424 | if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { |
986a4f4d | 1425 | netif_stop_subqueue(dev, qnum); |
b92f1e67 WB |
1426 | if (!use_napi && |
1427 | unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { | |
48925e37 | 1428 | /* More just got used, free them then recheck. */ |
b7dfde95 LT |
1429 | free_old_xmit_skbs(sq); |
1430 | if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { | |
986a4f4d | 1431 | netif_start_subqueue(dev, qnum); |
e9d7417b | 1432 | virtqueue_disable_cb(sq->vq); |
48925e37 RR |
1433 | } |
1434 | } | |
99ffc696 | 1435 | } |
48925e37 | 1436 | |
4b7fd2e6 | 1437 | if (kick || netif_xmit_stopped(txq)) |
0b725a2c | 1438 | virtqueue_kick(sq->vq); |
296f96fc | 1439 | |
0b725a2c | 1440 | return NETDEV_TX_OK; |
c223a078 DM |
1441 | } |
1442 | ||
40cbfc37 AK |
1443 | /* |
1444 | * Send command via the control virtqueue and check status. Commands | |
1445 | * supported by the hypervisor, as indicated by feature bits, should | |
788a8b6d | 1446 | * never fail unless improperly formatted. |
40cbfc37 AK |
1447 | */ |
1448 | static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, | |
d24bae32 | 1449 | struct scatterlist *out) |
40cbfc37 | 1450 | { |
f7bc9594 | 1451 | struct scatterlist *sgs[4], hdr, stat; |
d24bae32 | 1452 | unsigned out_num = 0, tmp; |
40cbfc37 AK |
1453 | |
1454 | /* Caller should know better */ | |
f7bc9594 | 1455 | BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); |
40cbfc37 | 1456 | |
2ac46030 MT |
1457 | vi->ctrl_status = ~0; |
1458 | vi->ctrl_hdr.class = class; | |
1459 | vi->ctrl_hdr.cmd = cmd; | |
f7bc9594 | 1460 | /* Add header */ |
2ac46030 | 1461 | sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr)); |
f7bc9594 | 1462 | sgs[out_num++] = &hdr; |
40cbfc37 | 1463 | |
f7bc9594 RR |
1464 | if (out) |
1465 | sgs[out_num++] = out; | |
40cbfc37 | 1466 | |
f7bc9594 | 1467 | /* Add return status. */ |
2ac46030 | 1468 | sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status)); |
d24bae32 | 1469 | sgs[out_num] = &stat; |
40cbfc37 | 1470 | |
d24bae32 | 1471 | BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); |
a7c58146 | 1472 | virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); |
40cbfc37 | 1473 | |
67975901 | 1474 | if (unlikely(!virtqueue_kick(vi->cvq))) |
2ac46030 | 1475 | return vi->ctrl_status == VIRTIO_NET_OK; |
40cbfc37 AK |
1476 | |
1477 | /* Spin for a response, the kick causes an ioport write, trapping | |
1478 | * into the hypervisor, so the request should be handled immediately. | |
1479 | */ | |
047b9b94 HG |
1480 | while (!virtqueue_get_buf(vi->cvq, &tmp) && |
1481 | !virtqueue_is_broken(vi->cvq)) | |
40cbfc37 AK |
1482 | cpu_relax(); |
1483 | ||
2ac46030 | 1484 | return vi->ctrl_status == VIRTIO_NET_OK; |
40cbfc37 AK |
1485 | } |
1486 | ||
9c46f6d4 AW |
1487 | static int virtnet_set_mac_address(struct net_device *dev, void *p) |
1488 | { | |
1489 | struct virtnet_info *vi = netdev_priv(dev); | |
1490 | struct virtio_device *vdev = vi->vdev; | |
f2f2c8b4 | 1491 | int ret; |
e37e2ff3 | 1492 | struct sockaddr *addr; |
7e58d5ae | 1493 | struct scatterlist sg; |
9c46f6d4 | 1494 | |
801822d1 | 1495 | addr = kmemdup(p, sizeof(*addr), GFP_KERNEL); |
e37e2ff3 AL |
1496 | if (!addr) |
1497 | return -ENOMEM; | |
e37e2ff3 AL |
1498 | |
1499 | ret = eth_prepare_mac_addr_change(dev, addr); | |
f2f2c8b4 | 1500 | if (ret) |
e37e2ff3 | 1501 | goto out; |
9c46f6d4 | 1502 | |
7e58d5ae AK |
1503 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { |
1504 | sg_init_one(&sg, addr->sa_data, dev->addr_len); | |
1505 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, | |
d24bae32 | 1506 | VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { |
7e58d5ae AK |
1507 | dev_warn(&vdev->dev, |
1508 | "Failed to set mac address by vq command.\n"); | |
e37e2ff3 AL |
1509 | ret = -EINVAL; |
1510 | goto out; | |
7e58d5ae | 1511 | } |
7e93a02f MT |
1512 | } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && |
1513 | !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { | |
855e0c52 RR |
1514 | unsigned int i; |
1515 | ||
1516 | /* Naturally, this has an atomicity problem. */ | |
1517 | for (i = 0; i < dev->addr_len; i++) | |
1518 | virtio_cwrite8(vdev, | |
1519 | offsetof(struct virtio_net_config, mac) + | |
1520 | i, addr->sa_data[i]); | |
7e58d5ae AK |
1521 | } |
1522 | ||
1523 | eth_commit_mac_addr_change(dev, p); | |
e37e2ff3 | 1524 | ret = 0; |
9c46f6d4 | 1525 | |
e37e2ff3 AL |
1526 | out: |
1527 | kfree(addr); | |
1528 | return ret; | |
9c46f6d4 AW |
1529 | } |
1530 | ||
bc1f4470 | 1531 | static void virtnet_stats(struct net_device *dev, |
1532 | struct rtnl_link_stats64 *tot) | |
3fa2a1df | 1533 | { |
1534 | struct virtnet_info *vi = netdev_priv(dev); | |
3fa2a1df | 1535 | unsigned int start; |
d7dfc5cf | 1536 | int i; |
3fa2a1df | 1537 | |
d7dfc5cf | 1538 | for (i = 0; i < vi->max_queue_pairs; i++) { |
3fa2a1df | 1539 | u64 tpackets, tbytes, rpackets, rbytes; |
d7dfc5cf TM |
1540 | struct receive_queue *rq = &vi->rq[i]; |
1541 | struct send_queue *sq = &vi->sq[i]; | |
3fa2a1df | 1542 | |
1543 | do { | |
d7dfc5cf TM |
1544 | start = u64_stats_fetch_begin_irq(&sq->stats.syncp); |
1545 | tpackets = sq->stats.packets; | |
1546 | tbytes = sq->stats.bytes; | |
1547 | } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); | |
83a27052 ED |
1548 | |
1549 | do { | |
d7dfc5cf TM |
1550 | start = u64_stats_fetch_begin_irq(&rq->stats.syncp); |
1551 | rpackets = rq->stats.packets; | |
1552 | rbytes = rq->stats.bytes; | |
1553 | } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); | |
3fa2a1df | 1554 | |
1555 | tot->rx_packets += rpackets; | |
1556 | tot->tx_packets += tpackets; | |
1557 | tot->rx_bytes += rbytes; | |
1558 | tot->tx_bytes += tbytes; | |
1559 | } | |
1560 | ||
1561 | tot->tx_dropped = dev->stats.tx_dropped; | |
021ac8d3 | 1562 | tot->tx_fifo_errors = dev->stats.tx_fifo_errors; |
3fa2a1df | 1563 | tot->rx_dropped = dev->stats.rx_dropped; |
1564 | tot->rx_length_errors = dev->stats.rx_length_errors; | |
1565 | tot->rx_frame_errors = dev->stats.rx_frame_errors; | |
3fa2a1df | 1566 | } |
1567 | ||
da74e89d AS |
1568 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1569 | static void virtnet_netpoll(struct net_device *dev) | |
1570 | { | |
1571 | struct virtnet_info *vi = netdev_priv(dev); | |
986a4f4d | 1572 | int i; |
da74e89d | 1573 | |
986a4f4d JW |
1574 | for (i = 0; i < vi->curr_queue_pairs; i++) |
1575 | napi_schedule(&vi->rq[i].napi); | |
da74e89d AS |
1576 | } |
1577 | #endif | |
1578 | ||
586d17c5 JW |
1579 | static void virtnet_ack_link_announce(struct virtnet_info *vi) |
1580 | { | |
1581 | rtnl_lock(); | |
1582 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, | |
d24bae32 | 1583 | VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) |
586d17c5 JW |
1584 | dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); |
1585 | rtnl_unlock(); | |
1586 | } | |
1587 | ||
47315329 | 1588 | static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) |
986a4f4d JW |
1589 | { |
1590 | struct scatterlist sg; | |
986a4f4d JW |
1591 | struct net_device *dev = vi->dev; |
1592 | ||
1593 | if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) | |
1594 | return 0; | |
1595 | ||
a725ee3e AL |
1596 | vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); |
1597 | sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq)); | |
986a4f4d JW |
1598 | |
1599 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, | |
d24bae32 | 1600 | VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { |
986a4f4d JW |
1601 | dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", |
1602 | queue_pairs); | |
1603 | return -EINVAL; | |
55257d72 | 1604 | } else { |
986a4f4d | 1605 | vi->curr_queue_pairs = queue_pairs; |
35ed159b JW |
1606 | /* virtnet_open() will refill when device is going to up. */ |
1607 | if (dev->flags & IFF_UP) | |
1608 | schedule_delayed_work(&vi->refill, 0); | |
55257d72 | 1609 | } |
986a4f4d JW |
1610 | |
1611 | return 0; | |
1612 | } | |
1613 | ||
47315329 JF |
1614 | static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) |
1615 | { | |
1616 | int err; | |
1617 | ||
1618 | rtnl_lock(); | |
1619 | err = _virtnet_set_queues(vi, queue_pairs); | |
1620 | rtnl_unlock(); | |
1621 | return err; | |
1622 | } | |
1623 | ||
296f96fc RR |
1624 | static int virtnet_close(struct net_device *dev) |
1625 | { | |
1626 | struct virtnet_info *vi = netdev_priv(dev); | |
986a4f4d | 1627 | int i; |
296f96fc | 1628 | |
b2baed69 RR |
1629 | /* Make sure refill_work doesn't re-enable napi! */ |
1630 | cancel_delayed_work_sync(&vi->refill); | |
986a4f4d | 1631 | |
b92f1e67 | 1632 | for (i = 0; i < vi->max_queue_pairs; i++) { |
754b8a21 | 1633 | xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); |
986a4f4d | 1634 | napi_disable(&vi->rq[i].napi); |
78a57b48 | 1635 | virtnet_napi_tx_disable(&vi->sq[i].napi); |
b92f1e67 | 1636 | } |
296f96fc | 1637 | |
296f96fc RR |
1638 | return 0; |
1639 | } | |
1640 | ||
2af7698e AW |
1641 | static void virtnet_set_rx_mode(struct net_device *dev) |
1642 | { | |
1643 | struct virtnet_info *vi = netdev_priv(dev); | |
f565a7c2 | 1644 | struct scatterlist sg[2]; |
f565a7c2 | 1645 | struct virtio_net_ctrl_mac *mac_data; |
ccffad25 | 1646 | struct netdev_hw_addr *ha; |
32e7bfc4 | 1647 | int uc_count; |
4cd24eaf | 1648 | int mc_count; |
f565a7c2 AW |
1649 | void *buf; |
1650 | int i; | |
2af7698e | 1651 | |
788a8b6d | 1652 | /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ |
2af7698e AW |
1653 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) |
1654 | return; | |
1655 | ||
2ac46030 MT |
1656 | vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0); |
1657 | vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0); | |
2af7698e | 1658 | |
2ac46030 | 1659 | sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc)); |
2af7698e AW |
1660 | |
1661 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, | |
d24bae32 | 1662 | VIRTIO_NET_CTRL_RX_PROMISC, sg)) |
2af7698e | 1663 | dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", |
2ac46030 | 1664 | vi->ctrl_promisc ? "en" : "dis"); |
2af7698e | 1665 | |
2ac46030 | 1666 | sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti)); |
2af7698e AW |
1667 | |
1668 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, | |
d24bae32 | 1669 | VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) |
2af7698e | 1670 | dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", |
2ac46030 | 1671 | vi->ctrl_allmulti ? "en" : "dis"); |
f565a7c2 | 1672 | |
32e7bfc4 | 1673 | uc_count = netdev_uc_count(dev); |
4cd24eaf | 1674 | mc_count = netdev_mc_count(dev); |
f565a7c2 | 1675 | /* MAC filter - use one buffer for both lists */ |
4cd24eaf JP |
1676 | buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + |
1677 | (2 * sizeof(mac_data->entries)), GFP_ATOMIC); | |
1678 | mac_data = buf; | |
e68ed8f0 | 1679 | if (!buf) |
f565a7c2 | 1680 | return; |
f565a7c2 | 1681 | |
23e258e1 AW |
1682 | sg_init_table(sg, 2); |
1683 | ||
f565a7c2 | 1684 | /* Store the unicast list and count in the front of the buffer */ |
fdd819b2 | 1685 | mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); |
ccffad25 | 1686 | i = 0; |
32e7bfc4 | 1687 | netdev_for_each_uc_addr(ha, dev) |
ccffad25 | 1688 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
f565a7c2 AW |
1689 | |
1690 | sg_set_buf(&sg[0], mac_data, | |
32e7bfc4 | 1691 | sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); |
f565a7c2 AW |
1692 | |
1693 | /* multicast list and count fill the end */ | |
32e7bfc4 | 1694 | mac_data = (void *)&mac_data->macs[uc_count][0]; |
f565a7c2 | 1695 | |
fdd819b2 | 1696 | mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); |
567ec874 | 1697 | i = 0; |
22bedad3 JP |
1698 | netdev_for_each_mc_addr(ha, dev) |
1699 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); | |
f565a7c2 AW |
1700 | |
1701 | sg_set_buf(&sg[1], mac_data, | |
4cd24eaf | 1702 | sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); |
f565a7c2 AW |
1703 | |
1704 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, | |
d24bae32 | 1705 | VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) |
99e872ae | 1706 | dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); |
f565a7c2 AW |
1707 | |
1708 | kfree(buf); | |
2af7698e AW |
1709 | } |
1710 | ||
80d5c368 PM |
1711 | static int virtnet_vlan_rx_add_vid(struct net_device *dev, |
1712 | __be16 proto, u16 vid) | |
0bde9569 AW |
1713 | { |
1714 | struct virtnet_info *vi = netdev_priv(dev); | |
1715 | struct scatterlist sg; | |
1716 | ||
a725ee3e AL |
1717 | vi->ctrl_vid = vid; |
1718 | sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); | |
0bde9569 AW |
1719 | |
1720 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, | |
d24bae32 | 1721 | VIRTIO_NET_CTRL_VLAN_ADD, &sg)) |
0bde9569 | 1722 | dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); |
8e586137 | 1723 | return 0; |
0bde9569 AW |
1724 | } |
1725 | ||
80d5c368 PM |
1726 | static int virtnet_vlan_rx_kill_vid(struct net_device *dev, |
1727 | __be16 proto, u16 vid) | |
0bde9569 AW |
1728 | { |
1729 | struct virtnet_info *vi = netdev_priv(dev); | |
1730 | struct scatterlist sg; | |
1731 | ||
a725ee3e AL |
1732 | vi->ctrl_vid = vid; |
1733 | sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); | |
0bde9569 AW |
1734 | |
1735 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, | |
d24bae32 | 1736 | VIRTIO_NET_CTRL_VLAN_DEL, &sg)) |
0bde9569 | 1737 | dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); |
8e586137 | 1738 | return 0; |
0bde9569 AW |
1739 | } |
1740 | ||
8898c21c | 1741 | static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) |
986a4f4d JW |
1742 | { |
1743 | int i; | |
1744 | ||
8898c21c WG |
1745 | if (vi->affinity_hint_set) { |
1746 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
47be2479 WG |
1747 | virtqueue_set_affinity(vi->rq[i].vq, -1); |
1748 | virtqueue_set_affinity(vi->sq[i].vq, -1); | |
1749 | } | |
1750 | ||
8898c21c WG |
1751 | vi->affinity_hint_set = false; |
1752 | } | |
8898c21c | 1753 | } |
47be2479 | 1754 | |
8898c21c WG |
1755 | static void virtnet_set_affinity(struct virtnet_info *vi) |
1756 | { | |
1757 | int i; | |
1758 | int cpu; | |
986a4f4d JW |
1759 | |
1760 | /* In multiqueue mode, when the number of cpu is equal to the number of | |
1761 | * queue pairs, we let the queue pairs to be private to one cpu by | |
1762 | * setting the affinity hint to eliminate the contention. | |
1763 | */ | |
8898c21c WG |
1764 | if (vi->curr_queue_pairs == 1 || |
1765 | vi->max_queue_pairs != num_online_cpus()) { | |
1766 | virtnet_clean_affinity(vi, -1); | |
1767 | return; | |
986a4f4d JW |
1768 | } |
1769 | ||
8898c21c WG |
1770 | i = 0; |
1771 | for_each_online_cpu(cpu) { | |
986a4f4d JW |
1772 | virtqueue_set_affinity(vi->rq[i].vq, cpu); |
1773 | virtqueue_set_affinity(vi->sq[i].vq, cpu); | |
9bb8ca86 | 1774 | netif_set_xps_queue(vi->dev, cpumask_of(cpu), i); |
8898c21c | 1775 | i++; |
986a4f4d JW |
1776 | } |
1777 | ||
8898c21c | 1778 | vi->affinity_hint_set = true; |
986a4f4d JW |
1779 | } |
1780 | ||
8017c279 | 1781 | static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node) |
8de4b2f3 | 1782 | { |
8017c279 SAS |
1783 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, |
1784 | node); | |
1785 | virtnet_set_affinity(vi); | |
1786 | return 0; | |
1787 | } | |
8de4b2f3 | 1788 | |
8017c279 SAS |
1789 | static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node) |
1790 | { | |
1791 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, | |
1792 | node_dead); | |
1793 | virtnet_set_affinity(vi); | |
1794 | return 0; | |
1795 | } | |
3ab098df | 1796 | |
8017c279 SAS |
1797 | static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node) |
1798 | { | |
1799 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, | |
1800 | node); | |
1801 | ||
1802 | virtnet_clean_affinity(vi, cpu); | |
1803 | return 0; | |
1804 | } | |
1805 | ||
1806 | static enum cpuhp_state virtionet_online; | |
1807 | ||
1808 | static int virtnet_cpu_notif_add(struct virtnet_info *vi) | |
1809 | { | |
1810 | int ret; | |
1811 | ||
1812 | ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); | |
1813 | if (ret) | |
1814 | return ret; | |
1815 | ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD, | |
1816 | &vi->node_dead); | |
1817 | if (!ret) | |
1818 | return ret; | |
1819 | cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); | |
1820 | return ret; | |
1821 | } | |
1822 | ||
1823 | static void virtnet_cpu_notif_remove(struct virtnet_info *vi) | |
1824 | { | |
1825 | cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); | |
1826 | cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD, | |
1827 | &vi->node_dead); | |
986a4f4d JW |
1828 | } |
1829 | ||
8f9f4668 RJ |
1830 | static void virtnet_get_ringparam(struct net_device *dev, |
1831 | struct ethtool_ringparam *ring) | |
1832 | { | |
1833 | struct virtnet_info *vi = netdev_priv(dev); | |
1834 | ||
986a4f4d JW |
1835 | ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); |
1836 | ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); | |
8f9f4668 RJ |
1837 | ring->rx_pending = ring->rx_max_pending; |
1838 | ring->tx_pending = ring->tx_max_pending; | |
8f9f4668 RJ |
1839 | } |
1840 | ||
66846048 RJ |
1841 | |
1842 | static void virtnet_get_drvinfo(struct net_device *dev, | |
1843 | struct ethtool_drvinfo *info) | |
1844 | { | |
1845 | struct virtnet_info *vi = netdev_priv(dev); | |
1846 | struct virtio_device *vdev = vi->vdev; | |
1847 | ||
1848 | strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); | |
1849 | strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); | |
1850 | strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); | |
1851 | ||
1852 | } | |
1853 | ||
d73bcd2c JW |
1854 | /* TODO: Eliminate OOO packets during switching */ |
1855 | static int virtnet_set_channels(struct net_device *dev, | |
1856 | struct ethtool_channels *channels) | |
1857 | { | |
1858 | struct virtnet_info *vi = netdev_priv(dev); | |
1859 | u16 queue_pairs = channels->combined_count; | |
1860 | int err; | |
1861 | ||
1862 | /* We don't support separate rx/tx channels. | |
1863 | * We don't allow setting 'other' channels. | |
1864 | */ | |
1865 | if (channels->rx_count || channels->tx_count || channels->other_count) | |
1866 | return -EINVAL; | |
1867 | ||
c18e9cd6 | 1868 | if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) |
d73bcd2c JW |
1869 | return -EINVAL; |
1870 | ||
f600b690 JF |
1871 | /* For now we don't support modifying channels while XDP is loaded |
1872 | * also when XDP is loaded all RX queues have XDP programs so we only | |
1873 | * need to check a single RX queue. | |
1874 | */ | |
1875 | if (vi->rq[0].xdp_prog) | |
1876 | return -EINVAL; | |
1877 | ||
47be2479 | 1878 | get_online_cpus(); |
47315329 | 1879 | err = _virtnet_set_queues(vi, queue_pairs); |
d73bcd2c JW |
1880 | if (!err) { |
1881 | netif_set_real_num_tx_queues(dev, queue_pairs); | |
1882 | netif_set_real_num_rx_queues(dev, queue_pairs); | |
1883 | ||
8898c21c | 1884 | virtnet_set_affinity(vi); |
d73bcd2c | 1885 | } |
47be2479 | 1886 | put_online_cpus(); |
d73bcd2c JW |
1887 | |
1888 | return err; | |
1889 | } | |
1890 | ||
d7dfc5cf TM |
1891 | static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data) |
1892 | { | |
1893 | struct virtnet_info *vi = netdev_priv(dev); | |
1894 | char *p = (char *)data; | |
1895 | unsigned int i, j; | |
1896 | ||
1897 | switch (stringset) { | |
1898 | case ETH_SS_STATS: | |
1899 | for (i = 0; i < vi->curr_queue_pairs; i++) { | |
1900 | for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { | |
1901 | snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s", | |
1902 | i, virtnet_rq_stats_desc[j].desc); | |
1903 | p += ETH_GSTRING_LEN; | |
1904 | } | |
1905 | } | |
1906 | ||
1907 | for (i = 0; i < vi->curr_queue_pairs; i++) { | |
1908 | for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { | |
1909 | snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_%s", | |
1910 | i, virtnet_sq_stats_desc[j].desc); | |
1911 | p += ETH_GSTRING_LEN; | |
1912 | } | |
1913 | } | |
1914 | break; | |
1915 | } | |
1916 | } | |
1917 | ||
1918 | static int virtnet_get_sset_count(struct net_device *dev, int sset) | |
1919 | { | |
1920 | struct virtnet_info *vi = netdev_priv(dev); | |
1921 | ||
1922 | switch (sset) { | |
1923 | case ETH_SS_STATS: | |
1924 | return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN + | |
1925 | VIRTNET_SQ_STATS_LEN); | |
1926 | default: | |
1927 | return -EOPNOTSUPP; | |
1928 | } | |
1929 | } | |
1930 | ||
1931 | static void virtnet_get_ethtool_stats(struct net_device *dev, | |
1932 | struct ethtool_stats *stats, u64 *data) | |
1933 | { | |
1934 | struct virtnet_info *vi = netdev_priv(dev); | |
1935 | unsigned int idx = 0, start, i, j; | |
1936 | const u8 *stats_base; | |
1937 | size_t offset; | |
1938 | ||
1939 | for (i = 0; i < vi->curr_queue_pairs; i++) { | |
1940 | struct receive_queue *rq = &vi->rq[i]; | |
1941 | ||
1942 | stats_base = (u8 *)&rq->stats; | |
1943 | do { | |
1944 | start = u64_stats_fetch_begin_irq(&rq->stats.syncp); | |
1945 | for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { | |
1946 | offset = virtnet_rq_stats_desc[j].offset; | |
1947 | data[idx + j] = *(u64 *)(stats_base + offset); | |
1948 | } | |
1949 | } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); | |
1950 | idx += VIRTNET_RQ_STATS_LEN; | |
1951 | } | |
1952 | ||
1953 | for (i = 0; i < vi->curr_queue_pairs; i++) { | |
1954 | struct send_queue *sq = &vi->sq[i]; | |
1955 | ||
1956 | stats_base = (u8 *)&sq->stats; | |
1957 | do { | |
1958 | start = u64_stats_fetch_begin_irq(&sq->stats.syncp); | |
1959 | for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { | |
1960 | offset = virtnet_sq_stats_desc[j].offset; | |
1961 | data[idx + j] = *(u64 *)(stats_base + offset); | |
1962 | } | |
1963 | } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); | |
1964 | idx += VIRTNET_SQ_STATS_LEN; | |
1965 | } | |
1966 | } | |
1967 | ||
d73bcd2c JW |
1968 | static void virtnet_get_channels(struct net_device *dev, |
1969 | struct ethtool_channels *channels) | |
1970 | { | |
1971 | struct virtnet_info *vi = netdev_priv(dev); | |
1972 | ||
1973 | channels->combined_count = vi->curr_queue_pairs; | |
1974 | channels->max_combined = vi->max_queue_pairs; | |
1975 | channels->max_other = 0; | |
1976 | channels->rx_count = 0; | |
1977 | channels->tx_count = 0; | |
1978 | channels->other_count = 0; | |
1979 | } | |
1980 | ||
16032be5 | 1981 | /* Check if the user is trying to change anything besides speed/duplex */ |
ebb6b4b1 PR |
1982 | static bool |
1983 | virtnet_validate_ethtool_cmd(const struct ethtool_link_ksettings *cmd) | |
16032be5 | 1984 | { |
ebb6b4b1 PR |
1985 | struct ethtool_link_ksettings diff1 = *cmd; |
1986 | struct ethtool_link_ksettings diff2 = {}; | |
16032be5 | 1987 | |
0cf3ace9 NA |
1988 | /* cmd is always set so we need to clear it, validate the port type |
1989 | * and also without autonegotiation we can ignore advertising | |
1990 | */ | |
ebb6b4b1 PR |
1991 | diff1.base.speed = 0; |
1992 | diff2.base.port = PORT_OTHER; | |
1993 | ethtool_link_ksettings_zero_link_mode(&diff1, advertising); | |
1994 | diff1.base.duplex = 0; | |
1995 | diff1.base.cmd = 0; | |
1996 | diff1.base.link_mode_masks_nwords = 0; | |
1997 | ||
1998 | return !memcmp(&diff1.base, &diff2.base, sizeof(diff1.base)) && | |
1999 | bitmap_empty(diff1.link_modes.supported, | |
2000 | __ETHTOOL_LINK_MODE_MASK_NBITS) && | |
2001 | bitmap_empty(diff1.link_modes.advertising, | |
2002 | __ETHTOOL_LINK_MODE_MASK_NBITS) && | |
2003 | bitmap_empty(diff1.link_modes.lp_advertising, | |
2004 | __ETHTOOL_LINK_MODE_MASK_NBITS); | |
16032be5 NA |
2005 | } |
2006 | ||
ebb6b4b1 PR |
2007 | static int virtnet_set_link_ksettings(struct net_device *dev, |
2008 | const struct ethtool_link_ksettings *cmd) | |
16032be5 NA |
2009 | { |
2010 | struct virtnet_info *vi = netdev_priv(dev); | |
2011 | u32 speed; | |
2012 | ||
ebb6b4b1 | 2013 | speed = cmd->base.speed; |
16032be5 NA |
2014 | /* don't allow custom speed and duplex */ |
2015 | if (!ethtool_validate_speed(speed) || | |
ebb6b4b1 | 2016 | !ethtool_validate_duplex(cmd->base.duplex) || |
16032be5 NA |
2017 | !virtnet_validate_ethtool_cmd(cmd)) |
2018 | return -EINVAL; | |
2019 | vi->speed = speed; | |
ebb6b4b1 | 2020 | vi->duplex = cmd->base.duplex; |
16032be5 NA |
2021 | |
2022 | return 0; | |
2023 | } | |
2024 | ||
ebb6b4b1 PR |
2025 | static int virtnet_get_link_ksettings(struct net_device *dev, |
2026 | struct ethtool_link_ksettings *cmd) | |
16032be5 NA |
2027 | { |
2028 | struct virtnet_info *vi = netdev_priv(dev); | |
2029 | ||
ebb6b4b1 PR |
2030 | cmd->base.speed = vi->speed; |
2031 | cmd->base.duplex = vi->duplex; | |
2032 | cmd->base.port = PORT_OTHER; | |
16032be5 NA |
2033 | |
2034 | return 0; | |
2035 | } | |
2036 | ||
2037 | static void virtnet_init_settings(struct net_device *dev) | |
2038 | { | |
2039 | struct virtnet_info *vi = netdev_priv(dev); | |
2040 | ||
2041 | vi->speed = SPEED_UNKNOWN; | |
2042 | vi->duplex = DUPLEX_UNKNOWN; | |
2043 | } | |
2044 | ||
faa9b39f JB |
2045 | static void virtnet_update_settings(struct virtnet_info *vi) |
2046 | { | |
2047 | u32 speed; | |
2048 | u8 duplex; | |
2049 | ||
2050 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) | |
2051 | return; | |
2052 | ||
2053 | speed = virtio_cread32(vi->vdev, offsetof(struct virtio_net_config, | |
2054 | speed)); | |
2055 | if (ethtool_validate_speed(speed)) | |
2056 | vi->speed = speed; | |
2057 | duplex = virtio_cread8(vi->vdev, offsetof(struct virtio_net_config, | |
2058 | duplex)); | |
2059 | if (ethtool_validate_duplex(duplex)) | |
2060 | vi->duplex = duplex; | |
2061 | } | |
2062 | ||
0fc0b732 | 2063 | static const struct ethtool_ops virtnet_ethtool_ops = { |
66846048 | 2064 | .get_drvinfo = virtnet_get_drvinfo, |
9f4d26d0 | 2065 | .get_link = ethtool_op_get_link, |
8f9f4668 | 2066 | .get_ringparam = virtnet_get_ringparam, |
d7dfc5cf TM |
2067 | .get_strings = virtnet_get_strings, |
2068 | .get_sset_count = virtnet_get_sset_count, | |
2069 | .get_ethtool_stats = virtnet_get_ethtool_stats, | |
d73bcd2c JW |
2070 | .set_channels = virtnet_set_channels, |
2071 | .get_channels = virtnet_get_channels, | |
074c3582 | 2072 | .get_ts_info = ethtool_op_get_ts_info, |
ebb6b4b1 PR |
2073 | .get_link_ksettings = virtnet_get_link_ksettings, |
2074 | .set_link_ksettings = virtnet_set_link_ksettings, | |
a9ea3fc6 HX |
2075 | }; |
2076 | ||
9fe7bfce JF |
2077 | static void virtnet_freeze_down(struct virtio_device *vdev) |
2078 | { | |
2079 | struct virtnet_info *vi = vdev->priv; | |
2080 | int i; | |
2081 | ||
2082 | /* Make sure no work handler is accessing the device */ | |
2083 | flush_work(&vi->config_work); | |
2084 | ||
2085 | netif_device_detach(vi->dev); | |
713a98d9 | 2086 | netif_tx_disable(vi->dev); |
9fe7bfce JF |
2087 | cancel_delayed_work_sync(&vi->refill); |
2088 | ||
2089 | if (netif_running(vi->dev)) { | |
b92f1e67 | 2090 | for (i = 0; i < vi->max_queue_pairs; i++) { |
9fe7bfce | 2091 | napi_disable(&vi->rq[i].napi); |
78a57b48 | 2092 | virtnet_napi_tx_disable(&vi->sq[i].napi); |
b92f1e67 | 2093 | } |
9fe7bfce JF |
2094 | } |
2095 | } | |
2096 | ||
2097 | static int init_vqs(struct virtnet_info *vi); | |
2098 | ||
2099 | static int virtnet_restore_up(struct virtio_device *vdev) | |
2100 | { | |
2101 | struct virtnet_info *vi = vdev->priv; | |
2102 | int err, i; | |
2103 | ||
2104 | err = init_vqs(vi); | |
2105 | if (err) | |
2106 | return err; | |
2107 | ||
2108 | virtio_device_ready(vdev); | |
2109 | ||
2110 | if (netif_running(vi->dev)) { | |
2111 | for (i = 0; i < vi->curr_queue_pairs; i++) | |
2112 | if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) | |
2113 | schedule_delayed_work(&vi->refill, 0); | |
2114 | ||
b92f1e67 | 2115 | for (i = 0; i < vi->max_queue_pairs; i++) { |
e4e8452a | 2116 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); |
b92f1e67 WB |
2117 | virtnet_napi_tx_enable(vi, vi->sq[i].vq, |
2118 | &vi->sq[i].napi); | |
2119 | } | |
9fe7bfce JF |
2120 | } |
2121 | ||
2122 | netif_device_attach(vi->dev); | |
2123 | return err; | |
2124 | } | |
2125 | ||
3f93522f JW |
2126 | static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) |
2127 | { | |
2128 | struct scatterlist sg; | |
2129 | vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads); | |
2130 | ||
2131 | sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads)); | |
2132 | ||
2133 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, | |
2134 | VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { | |
2135 | dev_warn(&vi->dev->dev, "Fail to set guest offload. \n"); | |
2136 | return -EINVAL; | |
2137 | } | |
2138 | ||
2139 | return 0; | |
2140 | } | |
2141 | ||
2142 | static int virtnet_clear_guest_offloads(struct virtnet_info *vi) | |
2143 | { | |
2144 | u64 offloads = 0; | |
2145 | ||
2146 | if (!vi->guest_offloads) | |
2147 | return 0; | |
2148 | ||
2149 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM)) | |
2150 | offloads = 1ULL << VIRTIO_NET_F_GUEST_CSUM; | |
2151 | ||
2152 | return virtnet_set_guest_offloads(vi, offloads); | |
2153 | } | |
2154 | ||
2155 | static int virtnet_restore_guest_offloads(struct virtnet_info *vi) | |
2156 | { | |
2157 | u64 offloads = vi->guest_offloads; | |
2158 | ||
2159 | if (!vi->guest_offloads) | |
2160 | return 0; | |
2161 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM)) | |
2162 | offloads |= 1ULL << VIRTIO_NET_F_GUEST_CSUM; | |
2163 | ||
2164 | return virtnet_set_guest_offloads(vi, offloads); | |
2165 | } | |
2166 | ||
9861ce03 JK |
2167 | static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, |
2168 | struct netlink_ext_ack *extack) | |
f600b690 JF |
2169 | { |
2170 | unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr); | |
2171 | struct virtnet_info *vi = netdev_priv(dev); | |
2172 | struct bpf_prog *old_prog; | |
017b29c3 | 2173 | u16 xdp_qp = 0, curr_qp; |
672aafd5 | 2174 | int i, err; |
f600b690 | 2175 | |
3f93522f JW |
2176 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
2177 | && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || | |
2178 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || | |
2179 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || | |
2180 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO))) { | |
4d463c4d | 2181 | NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first"); |
f600b690 JF |
2182 | return -EOPNOTSUPP; |
2183 | } | |
2184 | ||
2185 | if (vi->mergeable_rx_bufs && !vi->any_header_sg) { | |
4d463c4d | 2186 | NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required"); |
f600b690 JF |
2187 | return -EINVAL; |
2188 | } | |
2189 | ||
2190 | if (dev->mtu > max_sz) { | |
4d463c4d | 2191 | NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP"); |
f600b690 JF |
2192 | netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz); |
2193 | return -EINVAL; | |
2194 | } | |
2195 | ||
672aafd5 JF |
2196 | curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; |
2197 | if (prog) | |
2198 | xdp_qp = nr_cpu_ids; | |
2199 | ||
2200 | /* XDP requires extra queues for XDP_TX */ | |
2201 | if (curr_qp + xdp_qp > vi->max_queue_pairs) { | |
4d463c4d | 2202 | NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available"); |
672aafd5 JF |
2203 | netdev_warn(dev, "request %i queues but max is %i\n", |
2204 | curr_qp + xdp_qp, vi->max_queue_pairs); | |
2205 | return -ENOMEM; | |
2206 | } | |
2207 | ||
2de2f7f4 JF |
2208 | if (prog) { |
2209 | prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); | |
2210 | if (IS_ERR(prog)) | |
2211 | return PTR_ERR(prog); | |
2212 | } | |
2213 | ||
4941d472 | 2214 | /* Make sure NAPI is not using any XDP TX queues for RX. */ |
4e09ff53 JW |
2215 | if (netif_running(dev)) |
2216 | for (i = 0; i < vi->max_queue_pairs; i++) | |
2217 | napi_disable(&vi->rq[i].napi); | |
f600b690 | 2218 | |
672aafd5 | 2219 | netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); |
4941d472 JW |
2220 | err = _virtnet_set_queues(vi, curr_qp + xdp_qp); |
2221 | if (err) | |
2222 | goto err; | |
2223 | vi->xdp_queue_pairs = xdp_qp; | |
672aafd5 | 2224 | |
f600b690 JF |
2225 | for (i = 0; i < vi->max_queue_pairs; i++) { |
2226 | old_prog = rtnl_dereference(vi->rq[i].xdp_prog); | |
2227 | rcu_assign_pointer(vi->rq[i].xdp_prog, prog); | |
3f93522f JW |
2228 | if (i == 0) { |
2229 | if (!old_prog) | |
2230 | virtnet_clear_guest_offloads(vi); | |
2231 | if (!prog) | |
2232 | virtnet_restore_guest_offloads(vi); | |
2233 | } | |
f600b690 JF |
2234 | if (old_prog) |
2235 | bpf_prog_put(old_prog); | |
4e09ff53 JW |
2236 | if (netif_running(dev)) |
2237 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); | |
f600b690 JF |
2238 | } |
2239 | ||
2240 | return 0; | |
2de2f7f4 | 2241 | |
4941d472 JW |
2242 | err: |
2243 | for (i = 0; i < vi->max_queue_pairs; i++) | |
2244 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); | |
2de2f7f4 JF |
2245 | if (prog) |
2246 | bpf_prog_sub(prog, vi->max_queue_pairs - 1); | |
2247 | return err; | |
f600b690 JF |
2248 | } |
2249 | ||
5b0e6629 | 2250 | static u32 virtnet_xdp_query(struct net_device *dev) |
f600b690 JF |
2251 | { |
2252 | struct virtnet_info *vi = netdev_priv(dev); | |
5b0e6629 | 2253 | const struct bpf_prog *xdp_prog; |
f600b690 JF |
2254 | int i; |
2255 | ||
2256 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
5b0e6629 MKL |
2257 | xdp_prog = rtnl_dereference(vi->rq[i].xdp_prog); |
2258 | if (xdp_prog) | |
2259 | return xdp_prog->aux->id; | |
f600b690 | 2260 | } |
5b0e6629 | 2261 | return 0; |
f600b690 JF |
2262 | } |
2263 | ||
f4e63525 | 2264 | static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) |
f600b690 JF |
2265 | { |
2266 | switch (xdp->command) { | |
2267 | case XDP_SETUP_PROG: | |
9861ce03 | 2268 | return virtnet_xdp_set(dev, xdp->prog, xdp->extack); |
f600b690 | 2269 | case XDP_QUERY_PROG: |
5b0e6629 MKL |
2270 | xdp->prog_id = virtnet_xdp_query(dev); |
2271 | xdp->prog_attached = !!xdp->prog_id; | |
f600b690 JF |
2272 | return 0; |
2273 | default: | |
2274 | return -EINVAL; | |
2275 | } | |
2276 | } | |
2277 | ||
76288b4e SH |
2278 | static const struct net_device_ops virtnet_netdev = { |
2279 | .ndo_open = virtnet_open, | |
2280 | .ndo_stop = virtnet_close, | |
2281 | .ndo_start_xmit = start_xmit, | |
2282 | .ndo_validate_addr = eth_validate_addr, | |
9c46f6d4 | 2283 | .ndo_set_mac_address = virtnet_set_mac_address, |
2af7698e | 2284 | .ndo_set_rx_mode = virtnet_set_rx_mode, |
3fa2a1df | 2285 | .ndo_get_stats64 = virtnet_stats, |
1824a989 AW |
2286 | .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, |
2287 | .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, | |
76288b4e SH |
2288 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2289 | .ndo_poll_controller = virtnet_netpoll, | |
91815639 | 2290 | #endif |
f4e63525 | 2291 | .ndo_bpf = virtnet_xdp, |
186b3c99 JW |
2292 | .ndo_xdp_xmit = virtnet_xdp_xmit, |
2293 | .ndo_xdp_flush = virtnet_xdp_flush, | |
2836b4f2 | 2294 | .ndo_features_check = passthru_features_check, |
76288b4e SH |
2295 | }; |
2296 | ||
586d17c5 | 2297 | static void virtnet_config_changed_work(struct work_struct *work) |
9f4d26d0 | 2298 | { |
586d17c5 JW |
2299 | struct virtnet_info *vi = |
2300 | container_of(work, struct virtnet_info, config_work); | |
9f4d26d0 MM |
2301 | u16 v; |
2302 | ||
855e0c52 RR |
2303 | if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, |
2304 | struct virtio_net_config, status, &v) < 0) | |
507613bf | 2305 | return; |
586d17c5 JW |
2306 | |
2307 | if (v & VIRTIO_NET_S_ANNOUNCE) { | |
ee89bab1 | 2308 | netdev_notify_peers(vi->dev); |
586d17c5 JW |
2309 | virtnet_ack_link_announce(vi); |
2310 | } | |
9f4d26d0 MM |
2311 | |
2312 | /* Ignore unknown (future) status bits */ | |
2313 | v &= VIRTIO_NET_S_LINK_UP; | |
2314 | ||
2315 | if (vi->status == v) | |
507613bf | 2316 | return; |
9f4d26d0 MM |
2317 | |
2318 | vi->status = v; | |
2319 | ||
2320 | if (vi->status & VIRTIO_NET_S_LINK_UP) { | |
faa9b39f | 2321 | virtnet_update_settings(vi); |
9f4d26d0 | 2322 | netif_carrier_on(vi->dev); |
986a4f4d | 2323 | netif_tx_wake_all_queues(vi->dev); |
9f4d26d0 MM |
2324 | } else { |
2325 | netif_carrier_off(vi->dev); | |
986a4f4d | 2326 | netif_tx_stop_all_queues(vi->dev); |
9f4d26d0 MM |
2327 | } |
2328 | } | |
2329 | ||
2330 | static void virtnet_config_changed(struct virtio_device *vdev) | |
2331 | { | |
2332 | struct virtnet_info *vi = vdev->priv; | |
2333 | ||
3b07e9ca | 2334 | schedule_work(&vi->config_work); |
9f4d26d0 MM |
2335 | } |
2336 | ||
986a4f4d JW |
2337 | static void virtnet_free_queues(struct virtnet_info *vi) |
2338 | { | |
d4fb84ee AV |
2339 | int i; |
2340 | ||
ab3971b1 JW |
2341 | for (i = 0; i < vi->max_queue_pairs; i++) { |
2342 | napi_hash_del(&vi->rq[i].napi); | |
d4fb84ee | 2343 | netif_napi_del(&vi->rq[i].napi); |
b92f1e67 | 2344 | netif_napi_del(&vi->sq[i].napi); |
ab3971b1 | 2345 | } |
d4fb84ee | 2346 | |
963abe5c ED |
2347 | /* We called napi_hash_del() before netif_napi_del(), |
2348 | * we need to respect an RCU grace period before freeing vi->rq | |
2349 | */ | |
2350 | synchronize_net(); | |
2351 | ||
986a4f4d JW |
2352 | kfree(vi->rq); |
2353 | kfree(vi->sq); | |
2354 | } | |
2355 | ||
47315329 | 2356 | static void _free_receive_bufs(struct virtnet_info *vi) |
986a4f4d | 2357 | { |
f600b690 | 2358 | struct bpf_prog *old_prog; |
986a4f4d JW |
2359 | int i; |
2360 | ||
2361 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
2362 | while (vi->rq[i].pages) | |
2363 | __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); | |
f600b690 JF |
2364 | |
2365 | old_prog = rtnl_dereference(vi->rq[i].xdp_prog); | |
2366 | RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); | |
2367 | if (old_prog) | |
2368 | bpf_prog_put(old_prog); | |
986a4f4d | 2369 | } |
47315329 JF |
2370 | } |
2371 | ||
2372 | static void free_receive_bufs(struct virtnet_info *vi) | |
2373 | { | |
2374 | rtnl_lock(); | |
2375 | _free_receive_bufs(vi); | |
f600b690 | 2376 | rtnl_unlock(); |
986a4f4d JW |
2377 | } |
2378 | ||
fb51879d MD |
2379 | static void free_receive_page_frags(struct virtnet_info *vi) |
2380 | { | |
2381 | int i; | |
2382 | for (i = 0; i < vi->max_queue_pairs; i++) | |
2383 | if (vi->rq[i].alloc_frag.page) | |
2384 | put_page(vi->rq[i].alloc_frag.page); | |
2385 | } | |
2386 | ||
b68df015 | 2387 | static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) |
56434a01 JF |
2388 | { |
2389 | if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) | |
2390 | return false; | |
2391 | else if (q < vi->curr_queue_pairs) | |
2392 | return true; | |
2393 | else | |
2394 | return false; | |
2395 | } | |
2396 | ||
986a4f4d JW |
2397 | static void free_unused_bufs(struct virtnet_info *vi) |
2398 | { | |
2399 | void *buf; | |
2400 | int i; | |
2401 | ||
2402 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
2403 | struct virtqueue *vq = vi->sq[i].vq; | |
56434a01 | 2404 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { |
b68df015 | 2405 | if (!is_xdp_raw_buffer_queue(vi, i)) |
56434a01 JF |
2406 | dev_kfree_skb(buf); |
2407 | else | |
2408 | put_page(virt_to_head_page(buf)); | |
2409 | } | |
986a4f4d JW |
2410 | } |
2411 | ||
2412 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
2413 | struct virtqueue *vq = vi->rq[i].vq; | |
2414 | ||
2415 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { | |
ab7db917 | 2416 | if (vi->mergeable_rx_bufs) { |
680557cf | 2417 | put_page(virt_to_head_page(buf)); |
ab7db917 | 2418 | } else if (vi->big_packets) { |
fa9fac17 | 2419 | give_pages(&vi->rq[i], buf); |
ab7db917 | 2420 | } else { |
f6b10209 | 2421 | put_page(virt_to_head_page(buf)); |
ab7db917 | 2422 | } |
986a4f4d | 2423 | } |
986a4f4d JW |
2424 | } |
2425 | } | |
2426 | ||
e9d7417b JW |
2427 | static void virtnet_del_vqs(struct virtnet_info *vi) |
2428 | { | |
2429 | struct virtio_device *vdev = vi->vdev; | |
2430 | ||
8898c21c | 2431 | virtnet_clean_affinity(vi, -1); |
986a4f4d | 2432 | |
e9d7417b | 2433 | vdev->config->del_vqs(vdev); |
986a4f4d JW |
2434 | |
2435 | virtnet_free_queues(vi); | |
e9d7417b JW |
2436 | } |
2437 | ||
d85b758f MT |
2438 | /* How large should a single buffer be so a queue full of these can fit at |
2439 | * least one full packet? | |
2440 | * Logic below assumes the mergeable buffer header is used. | |
2441 | */ | |
2442 | static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq) | |
2443 | { | |
2444 | const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); | |
2445 | unsigned int rq_size = virtqueue_get_vring_size(vq); | |
2446 | unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; | |
2447 | unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; | |
2448 | unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); | |
2449 | ||
f0c3192c MT |
2450 | return max(max(min_buf_len, hdr_len) - hdr_len, |
2451 | (unsigned int)GOOD_PACKET_LEN); | |
d85b758f MT |
2452 | } |
2453 | ||
986a4f4d | 2454 | static int virtnet_find_vqs(struct virtnet_info *vi) |
3f9c10b0 | 2455 | { |
986a4f4d JW |
2456 | vq_callback_t **callbacks; |
2457 | struct virtqueue **vqs; | |
2458 | int ret = -ENOMEM; | |
2459 | int i, total_vqs; | |
2460 | const char **names; | |
d45b897b | 2461 | bool *ctx; |
986a4f4d JW |
2462 | |
2463 | /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by | |
2464 | * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by | |
2465 | * possible control vq. | |
2466 | */ | |
2467 | total_vqs = vi->max_queue_pairs * 2 + | |
2468 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); | |
2469 | ||
2470 | /* Allocate space for find_vqs parameters */ | |
2471 | vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); | |
2472 | if (!vqs) | |
2473 | goto err_vq; | |
2474 | callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL); | |
2475 | if (!callbacks) | |
2476 | goto err_callback; | |
2477 | names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); | |
2478 | if (!names) | |
2479 | goto err_names; | |
192f68cf | 2480 | if (!vi->big_packets || vi->mergeable_rx_bufs) { |
d45b897b MT |
2481 | ctx = kzalloc(total_vqs * sizeof(*ctx), GFP_KERNEL); |
2482 | if (!ctx) | |
2483 | goto err_ctx; | |
2484 | } else { | |
2485 | ctx = NULL; | |
2486 | } | |
986a4f4d JW |
2487 | |
2488 | /* Parameters for control virtqueue, if any */ | |
2489 | if (vi->has_cvq) { | |
2490 | callbacks[total_vqs - 1] = NULL; | |
2491 | names[total_vqs - 1] = "control"; | |
2492 | } | |
3f9c10b0 | 2493 | |
986a4f4d JW |
2494 | /* Allocate/initialize parameters for send/receive virtqueues */ |
2495 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
2496 | callbacks[rxq2vq(i)] = skb_recv_done; | |
2497 | callbacks[txq2vq(i)] = skb_xmit_done; | |
2498 | sprintf(vi->rq[i].name, "input.%d", i); | |
2499 | sprintf(vi->sq[i].name, "output.%d", i); | |
2500 | names[rxq2vq(i)] = vi->rq[i].name; | |
2501 | names[txq2vq(i)] = vi->sq[i].name; | |
d45b897b MT |
2502 | if (ctx) |
2503 | ctx[rxq2vq(i)] = true; | |
986a4f4d | 2504 | } |
3f9c10b0 | 2505 | |
986a4f4d | 2506 | ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, |
d45b897b | 2507 | names, ctx, NULL); |
986a4f4d JW |
2508 | if (ret) |
2509 | goto err_find; | |
3f9c10b0 | 2510 | |
986a4f4d JW |
2511 | if (vi->has_cvq) { |
2512 | vi->cvq = vqs[total_vqs - 1]; | |
3f9c10b0 | 2513 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) |
f646968f | 2514 | vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
3f9c10b0 | 2515 | } |
986a4f4d JW |
2516 | |
2517 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
2518 | vi->rq[i].vq = vqs[rxq2vq(i)]; | |
d85b758f | 2519 | vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); |
986a4f4d JW |
2520 | vi->sq[i].vq = vqs[txq2vq(i)]; |
2521 | } | |
2522 | ||
2523 | kfree(names); | |
2524 | kfree(callbacks); | |
2525 | kfree(vqs); | |
55281621 | 2526 | kfree(ctx); |
986a4f4d | 2527 | |
3f9c10b0 | 2528 | return 0; |
986a4f4d JW |
2529 | |
2530 | err_find: | |
d45b897b MT |
2531 | kfree(ctx); |
2532 | err_ctx: | |
986a4f4d JW |
2533 | kfree(names); |
2534 | err_names: | |
2535 | kfree(callbacks); | |
2536 | err_callback: | |
2537 | kfree(vqs); | |
2538 | err_vq: | |
2539 | return ret; | |
2540 | } | |
2541 | ||
2542 | static int virtnet_alloc_queues(struct virtnet_info *vi) | |
2543 | { | |
2544 | int i; | |
2545 | ||
2546 | vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); | |
2547 | if (!vi->sq) | |
2548 | goto err_sq; | |
2549 | vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); | |
008d4278 | 2550 | if (!vi->rq) |
986a4f4d JW |
2551 | goto err_rq; |
2552 | ||
2553 | INIT_DELAYED_WORK(&vi->refill, refill_work); | |
2554 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
2555 | vi->rq[i].pages = NULL; | |
2556 | netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, | |
2557 | napi_weight); | |
1d11e732 WB |
2558 | netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx, |
2559 | napi_tx ? napi_weight : 0); | |
986a4f4d JW |
2560 | |
2561 | sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); | |
5377d758 | 2562 | ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); |
986a4f4d | 2563 | sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); |
d7dfc5cf TM |
2564 | |
2565 | u64_stats_init(&vi->rq[i].stats.syncp); | |
2566 | u64_stats_init(&vi->sq[i].stats.syncp); | |
986a4f4d JW |
2567 | } |
2568 | ||
2569 | return 0; | |
2570 | ||
2571 | err_rq: | |
2572 | kfree(vi->sq); | |
2573 | err_sq: | |
2574 | return -ENOMEM; | |
2575 | } | |
2576 | ||
2577 | static int init_vqs(struct virtnet_info *vi) | |
2578 | { | |
2579 | int ret; | |
2580 | ||
2581 | /* Allocate send & receive queues */ | |
2582 | ret = virtnet_alloc_queues(vi); | |
2583 | if (ret) | |
2584 | goto err; | |
2585 | ||
2586 | ret = virtnet_find_vqs(vi); | |
2587 | if (ret) | |
2588 | goto err_free; | |
2589 | ||
47be2479 | 2590 | get_online_cpus(); |
8898c21c | 2591 | virtnet_set_affinity(vi); |
47be2479 WG |
2592 | put_online_cpus(); |
2593 | ||
986a4f4d JW |
2594 | return 0; |
2595 | ||
2596 | err_free: | |
2597 | virtnet_free_queues(vi); | |
2598 | err: | |
2599 | return ret; | |
3f9c10b0 AS |
2600 | } |
2601 | ||
fbf28d78 MD |
2602 | #ifdef CONFIG_SYSFS |
2603 | static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, | |
718ad681 | 2604 | char *buf) |
fbf28d78 MD |
2605 | { |
2606 | struct virtnet_info *vi = netdev_priv(queue->dev); | |
2607 | unsigned int queue_index = get_netdev_rx_queue_index(queue); | |
3cc81a9a JW |
2608 | unsigned int headroom = virtnet_get_headroom(vi); |
2609 | unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; | |
5377d758 | 2610 | struct ewma_pkt_len *avg; |
fbf28d78 MD |
2611 | |
2612 | BUG_ON(queue_index >= vi->max_queue_pairs); | |
2613 | avg = &vi->rq[queue_index].mrg_avg_pkt_len; | |
d85b758f | 2614 | return sprintf(buf, "%u\n", |
3cc81a9a JW |
2615 | get_mergeable_buf_len(&vi->rq[queue_index], avg, |
2616 | SKB_DATA_ALIGN(headroom + tailroom))); | |
fbf28d78 MD |
2617 | } |
2618 | ||
2619 | static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = | |
2620 | __ATTR_RO(mergeable_rx_buffer_size); | |
2621 | ||
2622 | static struct attribute *virtio_net_mrg_rx_attrs[] = { | |
2623 | &mergeable_rx_buffer_size_attribute.attr, | |
2624 | NULL | |
2625 | }; | |
2626 | ||
2627 | static const struct attribute_group virtio_net_mrg_rx_group = { | |
2628 | .name = "virtio_net", | |
2629 | .attrs = virtio_net_mrg_rx_attrs | |
2630 | }; | |
2631 | #endif | |
2632 | ||
892d6eb1 JW |
2633 | static bool virtnet_fail_on_feature(struct virtio_device *vdev, |
2634 | unsigned int fbit, | |
2635 | const char *fname, const char *dname) | |
2636 | { | |
2637 | if (!virtio_has_feature(vdev, fbit)) | |
2638 | return false; | |
2639 | ||
2640 | dev_err(&vdev->dev, "device advertises feature %s but not %s", | |
2641 | fname, dname); | |
2642 | ||
2643 | return true; | |
2644 | } | |
2645 | ||
2646 | #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ | |
2647 | virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) | |
2648 | ||
2649 | static bool virtnet_validate_features(struct virtio_device *vdev) | |
2650 | { | |
2651 | if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && | |
2652 | (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, | |
2653 | "VIRTIO_NET_F_CTRL_VQ") || | |
2654 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, | |
2655 | "VIRTIO_NET_F_CTRL_VQ") || | |
2656 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, | |
2657 | "VIRTIO_NET_F_CTRL_VQ") || | |
2658 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || | |
2659 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, | |
2660 | "VIRTIO_NET_F_CTRL_VQ"))) { | |
2661 | return false; | |
2662 | } | |
2663 | ||
2664 | return true; | |
2665 | } | |
2666 | ||
d0c2c997 JW |
2667 | #define MIN_MTU ETH_MIN_MTU |
2668 | #define MAX_MTU ETH_MAX_MTU | |
2669 | ||
fe36cbe0 | 2670 | static int virtnet_validate(struct virtio_device *vdev) |
296f96fc | 2671 | { |
6ba42248 MT |
2672 | if (!vdev->config->get) { |
2673 | dev_err(&vdev->dev, "%s failure: config access disabled\n", | |
2674 | __func__); | |
2675 | return -EINVAL; | |
2676 | } | |
2677 | ||
892d6eb1 JW |
2678 | if (!virtnet_validate_features(vdev)) |
2679 | return -EINVAL; | |
2680 | ||
fe36cbe0 MT |
2681 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { |
2682 | int mtu = virtio_cread16(vdev, | |
2683 | offsetof(struct virtio_net_config, | |
2684 | mtu)); | |
2685 | if (mtu < MIN_MTU) | |
2686 | __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); | |
2687 | } | |
2688 | ||
2689 | return 0; | |
2690 | } | |
2691 | ||
2692 | static int virtnet_probe(struct virtio_device *vdev) | |
2693 | { | |
d7dfc5cf | 2694 | int i, err = -ENOMEM; |
fe36cbe0 MT |
2695 | struct net_device *dev; |
2696 | struct virtnet_info *vi; | |
2697 | u16 max_queue_pairs; | |
2698 | int mtu; | |
2699 | ||
986a4f4d | 2700 | /* Find if host supports multiqueue virtio_net device */ |
855e0c52 RR |
2701 | err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, |
2702 | struct virtio_net_config, | |
2703 | max_virtqueue_pairs, &max_queue_pairs); | |
986a4f4d JW |
2704 | |
2705 | /* We need at least 2 queue's */ | |
2706 | if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || | |
2707 | max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || | |
2708 | !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) | |
2709 | max_queue_pairs = 1; | |
296f96fc RR |
2710 | |
2711 | /* Allocate ourselves a network device with room for our info */ | |
986a4f4d | 2712 | dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); |
296f96fc RR |
2713 | if (!dev) |
2714 | return -ENOMEM; | |
2715 | ||
2716 | /* Set up network device as normal. */ | |
f2f2c8b4 | 2717 | dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; |
76288b4e | 2718 | dev->netdev_ops = &virtnet_netdev; |
296f96fc | 2719 | dev->features = NETIF_F_HIGHDMA; |
3fa2a1df | 2720 | |
7ad24ea4 | 2721 | dev->ethtool_ops = &virtnet_ethtool_ops; |
296f96fc RR |
2722 | SET_NETDEV_DEV(dev, &vdev->dev); |
2723 | ||
2724 | /* Do we support "hardware" checksums? */ | |
98e778c9 | 2725 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { |
296f96fc | 2726 | /* This opens up the world of extra features. */ |
48900cb6 | 2727 | dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; |
98e778c9 | 2728 | if (csum) |
48900cb6 | 2729 | dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; |
98e778c9 MM |
2730 | |
2731 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { | |
e078de03 | 2732 | dev->hw_features |= NETIF_F_TSO |
34a48579 RR |
2733 | | NETIF_F_TSO_ECN | NETIF_F_TSO6; |
2734 | } | |
5539ae96 | 2735 | /* Individual feature bits: what can host handle? */ |
98e778c9 MM |
2736 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) |
2737 | dev->hw_features |= NETIF_F_TSO; | |
2738 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) | |
2739 | dev->hw_features |= NETIF_F_TSO6; | |
2740 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) | |
2741 | dev->hw_features |= NETIF_F_TSO_ECN; | |
98e778c9 | 2742 | |
41f2f127 JW |
2743 | dev->features |= NETIF_F_GSO_ROBUST; |
2744 | ||
98e778c9 | 2745 | if (gso) |
e078de03 | 2746 | dev->features |= dev->hw_features & NETIF_F_ALL_TSO; |
98e778c9 | 2747 | /* (!csum && gso) case will be fixed by register_netdev() */ |
296f96fc | 2748 | } |
4f49129b TH |
2749 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) |
2750 | dev->features |= NETIF_F_RXCSUM; | |
296f96fc | 2751 | |
4fda8302 JW |
2752 | dev->vlan_features = dev->features; |
2753 | ||
d0c2c997 JW |
2754 | /* MTU range: 68 - 65535 */ |
2755 | dev->min_mtu = MIN_MTU; | |
2756 | dev->max_mtu = MAX_MTU; | |
2757 | ||
296f96fc | 2758 | /* Configuration may specify what MAC to use. Otherwise random. */ |
855e0c52 RR |
2759 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) |
2760 | virtio_cread_bytes(vdev, | |
2761 | offsetof(struct virtio_net_config, mac), | |
2762 | dev->dev_addr, dev->addr_len); | |
2763 | else | |
f2cedb63 | 2764 | eth_hw_addr_random(dev); |
296f96fc RR |
2765 | |
2766 | /* Set up our device-specific information */ | |
2767 | vi = netdev_priv(dev); | |
296f96fc RR |
2768 | vi->dev = dev; |
2769 | vi->vdev = vdev; | |
d9d5dcc8 | 2770 | vdev->priv = vi; |
827da44c | 2771 | |
586d17c5 | 2772 | INIT_WORK(&vi->config_work, virtnet_config_changed_work); |
296f96fc | 2773 | |
97402b96 | 2774 | /* If we can receive ANY GSO packets, we must allocate large ones. */ |
8e95a202 JP |
2775 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || |
2776 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || | |
e3e3c423 VY |
2777 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || |
2778 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) | |
97402b96 HX |
2779 | vi->big_packets = true; |
2780 | ||
3f2c31d9 MM |
2781 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) |
2782 | vi->mergeable_rx_bufs = true; | |
2783 | ||
d04302b3 MT |
2784 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) || |
2785 | virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) | |
012873d0 MT |
2786 | vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
2787 | else | |
2788 | vi->hdr_len = sizeof(struct virtio_net_hdr); | |
2789 | ||
75993300 MT |
2790 | if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || |
2791 | virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) | |
e7428e95 MT |
2792 | vi->any_header_sg = true; |
2793 | ||
986a4f4d JW |
2794 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) |
2795 | vi->has_cvq = true; | |
2796 | ||
14de9d11 AC |
2797 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { |
2798 | mtu = virtio_cread16(vdev, | |
2799 | offsetof(struct virtio_net_config, | |
2800 | mtu)); | |
93a205ee | 2801 | if (mtu < dev->min_mtu) { |
fe36cbe0 MT |
2802 | /* Should never trigger: MTU was previously validated |
2803 | * in virtnet_validate. | |
2804 | */ | |
2805 | dev_err(&vdev->dev, "device MTU appears to have changed " | |
2806 | "it is now %d < %d", mtu, dev->min_mtu); | |
d7dfc5cf | 2807 | goto free; |
93a205ee | 2808 | } |
2e123b44 | 2809 | |
fe36cbe0 MT |
2810 | dev->mtu = mtu; |
2811 | dev->max_mtu = mtu; | |
2812 | ||
2e123b44 MT |
2813 | /* TODO: size buffers correctly in this case. */ |
2814 | if (dev->mtu > ETH_DATA_LEN) | |
2815 | vi->big_packets = true; | |
14de9d11 AC |
2816 | } |
2817 | ||
012873d0 MT |
2818 | if (vi->any_header_sg) |
2819 | dev->needed_headroom = vi->hdr_len; | |
6ebbc1a6 | 2820 | |
44900010 JW |
2821 | /* Enable multiqueue by default */ |
2822 | if (num_online_cpus() >= max_queue_pairs) | |
2823 | vi->curr_queue_pairs = max_queue_pairs; | |
2824 | else | |
2825 | vi->curr_queue_pairs = num_online_cpus(); | |
986a4f4d JW |
2826 | vi->max_queue_pairs = max_queue_pairs; |
2827 | ||
2828 | /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ | |
3f9c10b0 | 2829 | err = init_vqs(vi); |
d2a7ddda | 2830 | if (err) |
d7dfc5cf | 2831 | goto free; |
296f96fc | 2832 | |
fbf28d78 MD |
2833 | #ifdef CONFIG_SYSFS |
2834 | if (vi->mergeable_rx_bufs) | |
2835 | dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; | |
2836 | #endif | |
0f13b66b ZYW |
2837 | netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); |
2838 | netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); | |
986a4f4d | 2839 | |
16032be5 NA |
2840 | virtnet_init_settings(dev); |
2841 | ||
296f96fc RR |
2842 | err = register_netdev(dev); |
2843 | if (err) { | |
2844 | pr_debug("virtio_net: registering device failed\n"); | |
d2a7ddda | 2845 | goto free_vqs; |
296f96fc | 2846 | } |
b3369c1f | 2847 | |
4baf1e33 MT |
2848 | virtio_device_ready(vdev); |
2849 | ||
8017c279 | 2850 | err = virtnet_cpu_notif_add(vi); |
8de4b2f3 WG |
2851 | if (err) { |
2852 | pr_debug("virtio_net: registering cpu notifier failed\n"); | |
f00e35e2 | 2853 | goto free_unregister_netdev; |
8de4b2f3 WG |
2854 | } |
2855 | ||
a220871b | 2856 | virtnet_set_queues(vi, vi->curr_queue_pairs); |
44900010 | 2857 | |
167c25e4 JW |
2858 | /* Assume link up if device can't report link status, |
2859 | otherwise get link status from config. */ | |
bda7fab5 | 2860 | netif_carrier_off(dev); |
167c25e4 | 2861 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { |
3b07e9ca | 2862 | schedule_work(&vi->config_work); |
167c25e4 JW |
2863 | } else { |
2864 | vi->status = VIRTIO_NET_S_LINK_UP; | |
faa9b39f | 2865 | virtnet_update_settings(vi); |
167c25e4 JW |
2866 | netif_carrier_on(dev); |
2867 | } | |
9f4d26d0 | 2868 | |
3f93522f JW |
2869 | for (i = 0; i < ARRAY_SIZE(guest_offloads); i++) |
2870 | if (virtio_has_feature(vi->vdev, guest_offloads[i])) | |
2871 | set_bit(guest_offloads[i], &vi->guest_offloads); | |
2872 | ||
986a4f4d JW |
2873 | pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", |
2874 | dev->name, max_queue_pairs); | |
2875 | ||
296f96fc RR |
2876 | return 0; |
2877 | ||
f00e35e2 | 2878 | free_unregister_netdev: |
02465555 MT |
2879 | vi->vdev->config->reset(vdev); |
2880 | ||
b3369c1f | 2881 | unregister_netdev(dev); |
d2a7ddda | 2882 | free_vqs: |
986a4f4d | 2883 | cancel_delayed_work_sync(&vi->refill); |
fb51879d | 2884 | free_receive_page_frags(vi); |
e9d7417b | 2885 | virtnet_del_vqs(vi); |
296f96fc RR |
2886 | free: |
2887 | free_netdev(dev); | |
2888 | return err; | |
2889 | } | |
2890 | ||
04486ed0 | 2891 | static void remove_vq_common(struct virtnet_info *vi) |
296f96fc | 2892 | { |
04486ed0 | 2893 | vi->vdev->config->reset(vi->vdev); |
830a8a97 SM |
2894 | |
2895 | /* Free unused buffers in both send and recv, if any. */ | |
9ab86bbc | 2896 | free_unused_bufs(vi); |
fb6813f4 | 2897 | |
986a4f4d | 2898 | free_receive_bufs(vi); |
d2a7ddda | 2899 | |
fb51879d MD |
2900 | free_receive_page_frags(vi); |
2901 | ||
986a4f4d | 2902 | virtnet_del_vqs(vi); |
04486ed0 AS |
2903 | } |
2904 | ||
8cc085d6 | 2905 | static void virtnet_remove(struct virtio_device *vdev) |
04486ed0 AS |
2906 | { |
2907 | struct virtnet_info *vi = vdev->priv; | |
2908 | ||
8017c279 | 2909 | virtnet_cpu_notif_remove(vi); |
8de4b2f3 | 2910 | |
102a2786 MT |
2911 | /* Make sure no work handler is accessing the device. */ |
2912 | flush_work(&vi->config_work); | |
586d17c5 | 2913 | |
04486ed0 AS |
2914 | unregister_netdev(vi->dev); |
2915 | ||
2916 | remove_vq_common(vi); | |
fb6813f4 | 2917 | |
74b2553f | 2918 | free_netdev(vi->dev); |
296f96fc RR |
2919 | } |
2920 | ||
67a75194 | 2921 | static __maybe_unused int virtnet_freeze(struct virtio_device *vdev) |
0741bcb5 AS |
2922 | { |
2923 | struct virtnet_info *vi = vdev->priv; | |
2924 | ||
8017c279 | 2925 | virtnet_cpu_notif_remove(vi); |
9fe7bfce | 2926 | virtnet_freeze_down(vdev); |
0741bcb5 AS |
2927 | remove_vq_common(vi); |
2928 | ||
2929 | return 0; | |
2930 | } | |
2931 | ||
67a75194 | 2932 | static __maybe_unused int virtnet_restore(struct virtio_device *vdev) |
0741bcb5 AS |
2933 | { |
2934 | struct virtnet_info *vi = vdev->priv; | |
9fe7bfce | 2935 | int err; |
0741bcb5 | 2936 | |
9fe7bfce | 2937 | err = virtnet_restore_up(vdev); |
0741bcb5 AS |
2938 | if (err) |
2939 | return err; | |
986a4f4d JW |
2940 | virtnet_set_queues(vi, vi->curr_queue_pairs); |
2941 | ||
8017c279 | 2942 | err = virtnet_cpu_notif_add(vi); |
ec9debbd JW |
2943 | if (err) |
2944 | return err; | |
2945 | ||
0741bcb5 AS |
2946 | return 0; |
2947 | } | |
0741bcb5 | 2948 | |
296f96fc RR |
2949 | static struct virtio_device_id id_table[] = { |
2950 | { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, | |
2951 | { 0 }, | |
2952 | }; | |
2953 | ||
f3358507 MT |
2954 | #define VIRTNET_FEATURES \ |
2955 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ | |
2956 | VIRTIO_NET_F_MAC, \ | |
2957 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ | |
2958 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ | |
2959 | VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ | |
2960 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ | |
2961 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ | |
2962 | VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ | |
2963 | VIRTIO_NET_F_CTRL_MAC_ADDR, \ | |
faa9b39f JB |
2964 | VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \ |
2965 | VIRTIO_NET_F_SPEED_DUPLEX | |
f3358507 | 2966 | |
c45a6816 | 2967 | static unsigned int features[] = { |
f3358507 MT |
2968 | VIRTNET_FEATURES, |
2969 | }; | |
2970 | ||
2971 | static unsigned int features_legacy[] = { | |
2972 | VIRTNET_FEATURES, | |
2973 | VIRTIO_NET_F_GSO, | |
e7428e95 | 2974 | VIRTIO_F_ANY_LAYOUT, |
c45a6816 RR |
2975 | }; |
2976 | ||
22402529 | 2977 | static struct virtio_driver virtio_net_driver = { |
c45a6816 RR |
2978 | .feature_table = features, |
2979 | .feature_table_size = ARRAY_SIZE(features), | |
f3358507 MT |
2980 | .feature_table_legacy = features_legacy, |
2981 | .feature_table_size_legacy = ARRAY_SIZE(features_legacy), | |
296f96fc RR |
2982 | .driver.name = KBUILD_MODNAME, |
2983 | .driver.owner = THIS_MODULE, | |
2984 | .id_table = id_table, | |
fe36cbe0 | 2985 | .validate = virtnet_validate, |
296f96fc | 2986 | .probe = virtnet_probe, |
8cc085d6 | 2987 | .remove = virtnet_remove, |
9f4d26d0 | 2988 | .config_changed = virtnet_config_changed, |
89107000 | 2989 | #ifdef CONFIG_PM_SLEEP |
0741bcb5 AS |
2990 | .freeze = virtnet_freeze, |
2991 | .restore = virtnet_restore, | |
2992 | #endif | |
296f96fc RR |
2993 | }; |
2994 | ||
8017c279 SAS |
2995 | static __init int virtio_net_driver_init(void) |
2996 | { | |
2997 | int ret; | |
2998 | ||
73c1b41e | 2999 | ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online", |
8017c279 SAS |
3000 | virtnet_cpu_online, |
3001 | virtnet_cpu_down_prep); | |
3002 | if (ret < 0) | |
3003 | goto out; | |
3004 | virtionet_online = ret; | |
73c1b41e | 3005 | ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead", |
8017c279 SAS |
3006 | NULL, virtnet_cpu_dead); |
3007 | if (ret) | |
3008 | goto err_dead; | |
3009 | ||
3010 | ret = register_virtio_driver(&virtio_net_driver); | |
3011 | if (ret) | |
3012 | goto err_virtio; | |
3013 | return 0; | |
3014 | err_virtio: | |
3015 | cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); | |
3016 | err_dead: | |
3017 | cpuhp_remove_multi_state(virtionet_online); | |
3018 | out: | |
3019 | return ret; | |
3020 | } | |
3021 | module_init(virtio_net_driver_init); | |
3022 | ||
3023 | static __exit void virtio_net_driver_exit(void) | |
3024 | { | |
cfa0ebc9 | 3025 | unregister_virtio_driver(&virtio_net_driver); |
8017c279 SAS |
3026 | cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); |
3027 | cpuhp_remove_multi_state(virtionet_online); | |
8017c279 SAS |
3028 | } |
3029 | module_exit(virtio_net_driver_exit); | |
296f96fc RR |
3030 | |
3031 | MODULE_DEVICE_TABLE(virtio, id_table); | |
3032 | MODULE_DESCRIPTION("Virtio network driver"); | |
3033 | MODULE_LICENSE("GPL"); |