xen: Add RING_COPY_REQUEST()
[linux-2.6-block.git] / drivers / net / xen-netback / netback.c
CommitLineData
f942dc25
IC
1/*
2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
7 *
8 * Copyright (c) 2002-2005, K A Fraser
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
22 *
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * IN THE SOFTWARE.
33 */
34
35#include "common.h"
36
37#include <linux/kthread.h>
38#include <linux/if_vlan.h>
39#include <linux/udp.h>
e3377f36 40#include <linux/highmem.h>
f942dc25
IC
41
42#include <net/tcp.h>
43
ca981633 44#include <xen/xen.h>
f942dc25
IC
45#include <xen/events.h>
46#include <xen/interface/memory.h>
a9fd60e2 47#include <xen/page.h>
f942dc25
IC
48
49#include <asm/xen/hypercall.h>
f942dc25 50
e1f00a69
WL
51/* Provide an option to disable split event channels at load time as
52 * event channels are limited resource. Split event channels are
53 * enabled by default.
54 */
c489dbb1 55bool separate_tx_rx_irq = true;
e1f00a69
WL
56module_param(separate_tx_rx_irq, bool, 0644);
57
f48da8b1
DV
58/* The time that packets can stay on the guest Rx internal queue
59 * before they are dropped.
09350788
ZK
60 */
61unsigned int rx_drain_timeout_msecs = 10000;
62module_param(rx_drain_timeout_msecs, uint, 0444);
09350788 63
ecf08d2d
DV
64/* The length of time before the frontend is considered unresponsive
65 * because it isn't providing Rx slots.
66 */
26c0e102 67unsigned int rx_stall_timeout_msecs = 60000;
ecf08d2d 68module_param(rx_stall_timeout_msecs, uint, 0444);
ecf08d2d 69
8d3d53b3
AB
70unsigned int xenvif_max_queues;
71module_param_named(max_queues, xenvif_max_queues, uint, 0644);
72MODULE_PARM_DESC(max_queues,
73 "Maximum number of queues per virtual interface");
74
2810e5b9
WL
75/*
76 * This is the maximum slots a skb can have. If a guest sends a skb
77 * which exceeds this limit it is considered malicious.
78 */
37641494
WL
79#define FATAL_SKB_SLOTS_DEFAULT 20
80static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
81module_param(fatal_skb_slots, uint, 0444);
82
7e5d7753
MC
83/* The amount to copy out of the first guest Tx slot into the skb's
84 * linear area. If the first slot has more data, it will be mapped
85 * and put into the first frag.
86 *
87 * This is sized to avoid pulling headers from the frags for most
88 * TCP/IP packets.
89 */
90#define XEN_NETBACK_TX_COPY_LEN 128
91
92
e9ce7cb6 93static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
7376419a
WL
94 u8 status);
95
e9ce7cb6 96static void make_tx_response(struct xenvif_queue *queue,
f942dc25
IC
97 struct xen_netif_tx_request *txp,
98 s8 st);
c8a4d299 99static void push_tx_responses(struct xenvif_queue *queue);
b3f980bd 100
e9ce7cb6 101static inline int tx_work_todo(struct xenvif_queue *queue);
b3f980bd 102
e9ce7cb6 103static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
f942dc25
IC
104 u16 id,
105 s8 st,
106 u16 offset,
107 u16 size,
108 u16 flags);
109
e9ce7cb6 110static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
ea066ad1 111 u16 idx)
f942dc25 112{
e9ce7cb6 113 return page_to_pfn(queue->mmap_pages[idx]);
f942dc25
IC
114}
115
e9ce7cb6 116static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
ea066ad1 117 u16 idx)
f942dc25 118{
e9ce7cb6 119 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
f942dc25
IC
120}
121
7aceb47a
ZK
122#define callback_param(vif, pending_idx) \
123 (vif->pending_tx_info[pending_idx].callback_struct)
124
f53c3fe8
ZK
125/* Find the containing VIF's structure from a pointer in pending_tx_info array
126 */
e9ce7cb6 127static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
3e2234b3 128{
f53c3fe8
ZK
129 u16 pending_idx = ubuf->desc;
130 struct pending_tx_info *temp =
131 container_of(ubuf, struct pending_tx_info, callback_struct);
132 return container_of(temp - pending_idx,
e9ce7cb6 133 struct xenvif_queue,
f53c3fe8 134 pending_tx_info[0]);
3e2234b3 135}
f53c3fe8 136
ea066ad1
IC
137static u16 frag_get_pending_idx(skb_frag_t *frag)
138{
139 return (u16)frag->page_offset;
140}
141
142static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
143{
144 frag->page_offset = pending_idx;
145}
146
f942dc25
IC
147static inline pending_ring_idx_t pending_index(unsigned i)
148{
149 return i & (MAX_PENDING_REQS-1);
150}
151
1d5d4852
DV
152static int xenvif_rx_ring_slots_needed(struct xenvif *vif)
153{
154 if (vif->gso_mask)
d0089e8a 155 return DIV_ROUND_UP(vif->dev->gso_max_size, XEN_PAGE_SIZE) + 1;
1d5d4852 156 else
d0089e8a 157 return DIV_ROUND_UP(vif->dev->mtu, XEN_PAGE_SIZE);
1d5d4852
DV
158}
159
160static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
f942dc25 161{
ca2f09f2 162 RING_IDX prod, cons;
1d5d4852
DV
163 int needed;
164
165 needed = xenvif_rx_ring_slots_needed(queue->vif);
f942dc25 166
ca2f09f2 167 do {
e9ce7cb6
WL
168 prod = queue->rx.sring->req_prod;
169 cons = queue->rx.req_cons;
f942dc25 170
ca2f09f2
PD
171 if (prod - cons >= needed)
172 return true;
f942dc25 173
e9ce7cb6 174 queue->rx.sring->req_event = prod + 1;
f942dc25 175
ca2f09f2
PD
176 /* Make sure event is visible before we check prod
177 * again.
178 */
179 mb();
e9ce7cb6 180 } while (queue->rx.sring->req_prod != prod);
f942dc25 181
ca2f09f2 182 return false;
f942dc25
IC
183}
184
f48da8b1
DV
185void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
186{
187 unsigned long flags;
188
189 spin_lock_irqsave(&queue->rx_queue.lock, flags);
190
191 __skb_queue_tail(&queue->rx_queue, skb);
192
193 queue->rx_queue_len += skb->len;
194 if (queue->rx_queue_len > queue->rx_queue_max)
195 netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
196
197 spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
198}
199
200static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
201{
202 struct sk_buff *skb;
203
204 spin_lock_irq(&queue->rx_queue.lock);
205
206 skb = __skb_dequeue(&queue->rx_queue);
207 if (skb)
208 queue->rx_queue_len -= skb->len;
209
210 spin_unlock_irq(&queue->rx_queue.lock);
211
212 return skb;
213}
214
215static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue)
216{
217 spin_lock_irq(&queue->rx_queue.lock);
218
219 if (queue->rx_queue_len < queue->rx_queue_max)
220 netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
221
222 spin_unlock_irq(&queue->rx_queue.lock);
223}
224
225
226static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
227{
228 struct sk_buff *skb;
229 while ((skb = xenvif_rx_dequeue(queue)) != NULL)
230 kfree_skb(skb);
231}
232
233static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
234{
235 struct sk_buff *skb;
236
237 for(;;) {
238 skb = skb_peek(&queue->rx_queue);
239 if (!skb)
240 break;
241 if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
242 break;
243 xenvif_rx_dequeue(queue);
244 kfree_skb(skb);
245 }
246}
247
f942dc25
IC
248struct netrx_pending_operations {
249 unsigned copy_prod, copy_cons;
250 unsigned meta_prod, meta_cons;
251 struct gnttab_copy *copy;
b3f980bd 252 struct xenvif_rx_meta *meta;
f942dc25
IC
253 int copy_off;
254 grant_ref_t copy_gref;
255};
256
e9ce7cb6 257static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
b3f980bd 258 struct netrx_pending_operations *npo)
f942dc25 259{
b3f980bd 260 struct xenvif_rx_meta *meta;
f942dc25
IC
261 struct xen_netif_rx_request *req;
262
e9ce7cb6 263 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
f942dc25
IC
264
265 meta = npo->meta + npo->meta_prod++;
82cada22 266 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
f942dc25
IC
267 meta->gso_size = 0;
268 meta->size = 0;
269 meta->id = req->id;
270
271 npo->copy_off = 0;
272 npo->copy_gref = req->gref;
273
274 return meta;
275}
276
d0089e8a
JG
277struct gop_frag_copy {
278 struct xenvif_queue *queue;
279 struct netrx_pending_operations *npo;
280 struct xenvif_rx_meta *meta;
281 int head;
282 int gso_type;
283
284 struct page *page;
285};
286
287static void xenvif_setup_copy_gop(unsigned long gfn,
288 unsigned int offset,
289 unsigned int *len,
290 struct gop_frag_copy *info)
291{
292 struct gnttab_copy *copy_gop;
293 struct xen_page_foreign *foreign;
294 /* Convenient aliases */
295 struct xenvif_queue *queue = info->queue;
296 struct netrx_pending_operations *npo = info->npo;
297 struct page *page = info->page;
298
299 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
300
301 if (npo->copy_off == MAX_BUFFER_OFFSET)
302 info->meta = get_next_rx_buffer(queue, npo);
303
304 if (npo->copy_off + *len > MAX_BUFFER_OFFSET)
305 *len = MAX_BUFFER_OFFSET - npo->copy_off;
306
307 copy_gop = npo->copy + npo->copy_prod++;
308 copy_gop->flags = GNTCOPY_dest_gref;
309 copy_gop->len = *len;
310
311 foreign = xen_page_foreign(page);
312 if (foreign) {
313 copy_gop->source.domid = foreign->domid;
314 copy_gop->source.u.ref = foreign->gref;
315 copy_gop->flags |= GNTCOPY_source_gref;
316 } else {
317 copy_gop->source.domid = DOMID_SELF;
318 copy_gop->source.u.gmfn = gfn;
319 }
320 copy_gop->source.offset = offset;
321
322 copy_gop->dest.domid = queue->vif->domid;
323 copy_gop->dest.offset = npo->copy_off;
324 copy_gop->dest.u.ref = npo->copy_gref;
325
326 npo->copy_off += *len;
327 info->meta->size += *len;
328
329 /* Leave a gap for the GSO descriptor. */
330 if (info->head && ((1 << info->gso_type) & queue->vif->gso_mask))
331 queue->rx.req_cons++;
332
333 info->head = 0; /* There must be something in this buffer now */
334}
335
336static void xenvif_gop_frag_copy_grant(unsigned long gfn,
337 unsigned offset,
338 unsigned int len,
339 void *data)
340{
341 unsigned int bytes;
342
343 while (len) {
344 bytes = len;
345 xenvif_setup_copy_gop(gfn, offset, &bytes, data);
346 offset += bytes;
347 len -= bytes;
348 }
349}
350
33bc801d
WL
351/*
352 * Set up the grant operations for this fragment. If it's a flipping
353 * interface, we also set up the unmap request from here.
354 */
e9ce7cb6 355static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
7376419a
WL
356 struct netrx_pending_operations *npo,
357 struct page *page, unsigned long size,
c2677a6f 358 unsigned long offset, int *head)
f942dc25 359{
d0089e8a
JG
360 struct gop_frag_copy info = {
361 .queue = queue,
362 .npo = npo,
363 .head = *head,
364 .gso_type = XEN_NETIF_GSO_TYPE_NONE,
365 };
f942dc25
IC
366 unsigned long bytes;
367
a0f2e80f
JG
368 if (skb_is_gso(skb)) {
369 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
d0089e8a 370 info.gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
a0f2e80f 371 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
d0089e8a 372 info.gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
a0f2e80f
JG
373 }
374
f942dc25 375 /* Data must not cross a page boundary. */
6a8ed462 376 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
f942dc25 377
d0089e8a 378 info.meta = npo->meta + npo->meta_prod - 1;
f942dc25 379
6a8ed462
IC
380 /* Skip unused frames from start of page */
381 page += offset >> PAGE_SHIFT;
382 offset &= ~PAGE_MASK;
383
f942dc25 384 while (size > 0) {
6a8ed462 385 BUG_ON(offset >= PAGE_SIZE);
6a8ed462 386
1650d545 387 bytes = PAGE_SIZE - offset;
6a8ed462
IC
388 if (bytes > size)
389 bytes = size;
390
d0089e8a
JG
391 info.page = page;
392 gnttab_foreach_grant_in_range(page, offset, bytes,
393 xenvif_gop_frag_copy_grant,
394 &info);
f942dc25 395 size -= bytes;
d0089e8a 396 offset = 0;
f942dc25 397
d0089e8a
JG
398 /* Next page */
399 if (size) {
6a8ed462
IC
400 BUG_ON(!PageCompound(page));
401 page++;
6a8ed462 402 }
f942dc25 403 }
d0089e8a
JG
404
405 *head = info.head;
f942dc25
IC
406}
407
408/*
409 * Prepare an SKB to be transmitted to the frontend.
410 *
411 * This function is responsible for allocating grant operations, meta
412 * structures, etc.
413 *
414 * It returns the number of meta structures consumed. The number of
415 * ring slots used is always equal to the number of meta slots used
416 * plus the number of GSO descriptors used. Currently, we use either
417 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
418 * frontend-side LRO).
419 */
7376419a 420static int xenvif_gop_skb(struct sk_buff *skb,
e9ce7cb6
WL
421 struct netrx_pending_operations *npo,
422 struct xenvif_queue *queue)
f942dc25
IC
423{
424 struct xenvif *vif = netdev_priv(skb->dev);
425 int nr_frags = skb_shinfo(skb)->nr_frags;
426 int i;
427 struct xen_netif_rx_request *req;
b3f980bd 428 struct xenvif_rx_meta *meta;
f942dc25 429 unsigned char *data;
33bc801d 430 int head = 1;
f942dc25 431 int old_meta_prod;
82cada22 432 int gso_type;
f942dc25
IC
433
434 old_meta_prod = npo->meta_prod;
435
5bd07670
AL
436 gso_type = XEN_NETIF_GSO_TYPE_NONE;
437 if (skb_is_gso(skb)) {
438 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
439 gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
440 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
441 gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
82cada22
PD
442 }
443
f942dc25 444 /* Set up a GSO prefix descriptor, if necessary */
a3314f3d 445 if ((1 << gso_type) & vif->gso_prefix_mask) {
e9ce7cb6 446 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
f942dc25 447 meta = npo->meta + npo->meta_prod++;
82cada22 448 meta->gso_type = gso_type;
5bd07670 449 meta->gso_size = skb_shinfo(skb)->gso_size;
f942dc25
IC
450 meta->size = 0;
451 meta->id = req->id;
452 }
453
e9ce7cb6 454 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
f942dc25
IC
455 meta = npo->meta + npo->meta_prod++;
456
82cada22
PD
457 if ((1 << gso_type) & vif->gso_mask) {
458 meta->gso_type = gso_type;
5bd07670 459 meta->gso_size = skb_shinfo(skb)->gso_size;
82cada22
PD
460 } else {
461 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
f942dc25 462 meta->gso_size = 0;
82cada22 463 }
f942dc25
IC
464
465 meta->size = 0;
466 meta->id = req->id;
467 npo->copy_off = 0;
468 npo->copy_gref = req->gref;
469
470 data = skb->data;
471 while (data < skb_tail_pointer(skb)) {
472 unsigned int offset = offset_in_page(data);
473 unsigned int len = PAGE_SIZE - offset;
474
475 if (data + len > skb_tail_pointer(skb))
476 len = skb_tail_pointer(skb) - data;
477
e9ce7cb6 478 xenvif_gop_frag_copy(queue, skb, npo,
c2677a6f 479 virt_to_page(data), len, offset, &head);
f942dc25
IC
480 data += len;
481 }
482
483 for (i = 0; i < nr_frags; i++) {
e9ce7cb6 484 xenvif_gop_frag_copy(queue, skb, npo,
7376419a
WL
485 skb_frag_page(&skb_shinfo(skb)->frags[i]),
486 skb_frag_size(&skb_shinfo(skb)->frags[i]),
487 skb_shinfo(skb)->frags[i].page_offset,
c2677a6f 488 &head);
f942dc25
IC
489 }
490
491 return npo->meta_prod - old_meta_prod;
492}
493
494/*
7376419a 495 * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was
f942dc25
IC
496 * used to set up the operations on the top of
497 * netrx_pending_operations, which have since been done. Check that
498 * they didn't give any errors and advance over them.
499 */
7376419a
WL
500static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
501 struct netrx_pending_operations *npo)
f942dc25
IC
502{
503 struct gnttab_copy *copy_op;
504 int status = XEN_NETIF_RSP_OKAY;
505 int i;
506
507 for (i = 0; i < nr_meta_slots; i++) {
508 copy_op = npo->copy + npo->copy_cons++;
509 if (copy_op->status != GNTST_okay) {
510 netdev_dbg(vif->dev,
511 "Bad status %d from copy to DOM%d.\n",
512 copy_op->status, vif->domid);
513 status = XEN_NETIF_RSP_ERROR;
514 }
515 }
516
517 return status;
518}
519
e9ce7cb6 520static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
7376419a
WL
521 struct xenvif_rx_meta *meta,
522 int nr_meta_slots)
f942dc25
IC
523{
524 int i;
525 unsigned long offset;
526
527 /* No fragments used */
528 if (nr_meta_slots <= 1)
529 return;
530
531 nr_meta_slots--;
532
533 for (i = 0; i < nr_meta_slots; i++) {
534 int flags;
535 if (i == nr_meta_slots - 1)
536 flags = 0;
537 else
538 flags = XEN_NETRXF_more_data;
539
540 offset = 0;
e9ce7cb6 541 make_rx_response(queue, meta[i].id, status, offset,
f942dc25
IC
542 meta[i].size, flags);
543 }
544}
545
e9ce7cb6 546void xenvif_kick_thread(struct xenvif_queue *queue)
b3f980bd 547{
e9ce7cb6 548 wake_up(&queue->wq);
b3f980bd
WL
549}
550
e9ce7cb6 551static void xenvif_rx_action(struct xenvif_queue *queue)
f942dc25 552{
f942dc25 553 s8 status;
e1f00a69 554 u16 flags;
f942dc25
IC
555 struct xen_netif_rx_response *resp;
556 struct sk_buff_head rxq;
557 struct sk_buff *skb;
558 LIST_HEAD(notify);
559 int ret;
f942dc25 560 unsigned long offset;
11b57f90 561 bool need_to_notify = false;
f942dc25
IC
562
563 struct netrx_pending_operations npo = {
e9ce7cb6
WL
564 .copy = queue->grant_copy_op,
565 .meta = queue->meta,
f942dc25
IC
566 };
567
568 skb_queue_head_init(&rxq);
569
1d5d4852 570 while (xenvif_rx_ring_slots_available(queue)
f48da8b1 571 && (skb = xenvif_rx_dequeue(queue)) != NULL) {
ecf08d2d
DV
572 queue->last_rx_time = jiffies;
573
e9ce7cb6 574 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
1425c7a4 575
f942dc25 576 __skb_queue_tail(&rxq, skb);
f942dc25
IC
577 }
578
e9ce7cb6 579 BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
f942dc25
IC
580
581 if (!npo.copy_prod)
ca2f09f2 582 goto done;
f942dc25 583
ac3d5ac2 584 BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
e9ce7cb6 585 gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
f942dc25
IC
586
587 while ((skb = __skb_dequeue(&rxq)) != NULL) {
f942dc25 588
e9ce7cb6
WL
589 if ((1 << queue->meta[npo.meta_cons].gso_type) &
590 queue->vif->gso_prefix_mask) {
591 resp = RING_GET_RESPONSE(&queue->rx,
592 queue->rx.rsp_prod_pvt++);
f942dc25
IC
593
594 resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
595
e9ce7cb6
WL
596 resp->offset = queue->meta[npo.meta_cons].gso_size;
597 resp->id = queue->meta[npo.meta_cons].id;
8f13dd96 598 resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
f942dc25
IC
599
600 npo.meta_cons++;
8f13dd96 601 XENVIF_RX_CB(skb)->meta_slots_used--;
f942dc25
IC
602 }
603
604
e9ce7cb6
WL
605 queue->stats.tx_bytes += skb->len;
606 queue->stats.tx_packets++;
f942dc25 607
e9ce7cb6 608 status = xenvif_check_gop(queue->vif,
8f13dd96
ZK
609 XENVIF_RX_CB(skb)->meta_slots_used,
610 &npo);
f942dc25 611
8f13dd96 612 if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
f942dc25
IC
613 flags = 0;
614 else
615 flags = XEN_NETRXF_more_data;
616
617 if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
618 flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
619 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
620 /* remote but checksummed. */
621 flags |= XEN_NETRXF_data_validated;
622
623 offset = 0;
e9ce7cb6 624 resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
f942dc25 625 status, offset,
e9ce7cb6 626 queue->meta[npo.meta_cons].size,
f942dc25
IC
627 flags);
628
e9ce7cb6
WL
629 if ((1 << queue->meta[npo.meta_cons].gso_type) &
630 queue->vif->gso_mask) {
f942dc25
IC
631 struct xen_netif_extra_info *gso =
632 (struct xen_netif_extra_info *)
e9ce7cb6
WL
633 RING_GET_RESPONSE(&queue->rx,
634 queue->rx.rsp_prod_pvt++);
f942dc25
IC
635
636 resp->flags |= XEN_NETRXF_extra_info;
637
e9ce7cb6
WL
638 gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
639 gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
f942dc25
IC
640 gso->u.gso.pad = 0;
641 gso->u.gso.features = 0;
642
643 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
644 gso->flags = 0;
645 }
646
e9ce7cb6
WL
647 xenvif_add_frag_responses(queue, status,
648 queue->meta + npo.meta_cons + 1,
8f13dd96 649 XENVIF_RX_CB(skb)->meta_slots_used);
f942dc25 650
e9ce7cb6 651 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
f942dc25 652
11b57f90 653 need_to_notify |= !!ret;
b3f980bd 654
8f13dd96 655 npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
f942dc25
IC
656 dev_kfree_skb(skb);
657 }
658
ca2f09f2 659done:
b3f980bd 660 if (need_to_notify)
e9ce7cb6 661 notify_remote_via_irq(queue->rx_irq);
f942dc25
IC
662}
663
e9ce7cb6 664void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
f942dc25
IC
665{
666 int more_to_do;
667
e9ce7cb6 668 RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
f942dc25
IC
669
670 if (more_to_do)
e9ce7cb6 671 napi_schedule(&queue->napi);
f942dc25
IC
672}
673
e9ce7cb6 674static void tx_add_credit(struct xenvif_queue *queue)
f942dc25
IC
675{
676 unsigned long max_burst, max_credit;
677
678 /*
679 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
680 * Otherwise the interface can seize up due to insufficient credit.
681 */
e9ce7cb6 682 max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size;
f942dc25 683 max_burst = min(max_burst, 131072UL);
e9ce7cb6 684 max_burst = max(max_burst, queue->credit_bytes);
f942dc25
IC
685
686 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
e9ce7cb6
WL
687 max_credit = queue->remaining_credit + queue->credit_bytes;
688 if (max_credit < queue->remaining_credit)
f942dc25
IC
689 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
690
e9ce7cb6 691 queue->remaining_credit = min(max_credit, max_burst);
f942dc25
IC
692}
693
edafc132 694void xenvif_tx_credit_callback(unsigned long data)
f942dc25 695{
e9ce7cb6
WL
696 struct xenvif_queue *queue = (struct xenvif_queue *)data;
697 tx_add_credit(queue);
698 xenvif_napi_schedule_or_enable_events(queue);
f942dc25
IC
699}
700
e9ce7cb6 701static void xenvif_tx_err(struct xenvif_queue *queue,
7376419a 702 struct xen_netif_tx_request *txp, RING_IDX end)
f942dc25 703{
e9ce7cb6 704 RING_IDX cons = queue->tx.req_cons;
f53c3fe8 705 unsigned long flags;
f942dc25
IC
706
707 do {
e9ce7cb6
WL
708 spin_lock_irqsave(&queue->response_lock, flags);
709 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
c8a4d299 710 push_tx_responses(queue);
e9ce7cb6 711 spin_unlock_irqrestore(&queue->response_lock, flags);
b9149729 712 if (cons == end)
f942dc25 713 break;
e9ce7cb6 714 txp = RING_GET_REQUEST(&queue->tx, cons++);
f942dc25 715 } while (1);
e9ce7cb6 716 queue->tx.req_cons = cons;
f942dc25
IC
717}
718
7376419a 719static void xenvif_fatal_tx_err(struct xenvif *vif)
48856286
IC
720{
721 netdev_err(vif->dev, "fatal error; disabling device\n");
e9d8b2c2 722 vif->disabled = true;
e9ce7cb6
WL
723 /* Disable the vif from queue 0's kthread */
724 if (vif->queues)
725 xenvif_kick_thread(&vif->queues[0]);
48856286
IC
726}
727
e9ce7cb6 728static int xenvif_count_requests(struct xenvif_queue *queue,
7376419a
WL
729 struct xen_netif_tx_request *first,
730 struct xen_netif_tx_request *txp,
731 int work_to_do)
f942dc25 732{
e9ce7cb6 733 RING_IDX cons = queue->tx.req_cons;
2810e5b9
WL
734 int slots = 0;
735 int drop_err = 0;
59ccb4eb 736 int more_data;
f942dc25
IC
737
738 if (!(first->flags & XEN_NETTXF_more_data))
739 return 0;
740
741 do {
59ccb4eb
WL
742 struct xen_netif_tx_request dropped_tx = { 0 };
743
2810e5b9 744 if (slots >= work_to_do) {
e9ce7cb6 745 netdev_err(queue->vif->dev,
2810e5b9
WL
746 "Asked for %d slots but exceeds this limit\n",
747 work_to_do);
e9ce7cb6 748 xenvif_fatal_tx_err(queue->vif);
35876b5f 749 return -ENODATA;
f942dc25
IC
750 }
751
2810e5b9
WL
752 /* This guest is really using too many slots and
753 * considered malicious.
754 */
37641494 755 if (unlikely(slots >= fatal_skb_slots)) {
e9ce7cb6 756 netdev_err(queue->vif->dev,
2810e5b9 757 "Malicious frontend using %d slots, threshold %u\n",
37641494 758 slots, fatal_skb_slots);
e9ce7cb6 759 xenvif_fatal_tx_err(queue->vif);
35876b5f 760 return -E2BIG;
f942dc25
IC
761 }
762
2810e5b9 763 /* Xen network protocol had implicit dependency on
37641494
WL
764 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
765 * the historical MAX_SKB_FRAGS value 18 to honor the
766 * same behavior as before. Any packet using more than
767 * 18 slots but less than fatal_skb_slots slots is
768 * dropped
2810e5b9 769 */
37641494 770 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
2810e5b9 771 if (net_ratelimit())
e9ce7cb6 772 netdev_dbg(queue->vif->dev,
2810e5b9 773 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
37641494 774 slots, XEN_NETBK_LEGACY_SLOTS_MAX);
2810e5b9
WL
775 drop_err = -E2BIG;
776 }
777
59ccb4eb
WL
778 if (drop_err)
779 txp = &dropped_tx;
780
e9ce7cb6 781 memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots),
f942dc25 782 sizeof(*txp));
03393fd5
WL
783
784 /* If the guest submitted a frame >= 64 KiB then
785 * first->size overflowed and following slots will
786 * appear to be larger than the frame.
787 *
788 * This cannot be fatal error as there are buggy
789 * frontends that do this.
790 *
791 * Consume all slots and drop the packet.
792 */
793 if (!drop_err && txp->size > first->size) {
794 if (net_ratelimit())
e9ce7cb6 795 netdev_dbg(queue->vif->dev,
03393fd5
WL
796 "Invalid tx request, slot size %u > remaining size %u\n",
797 txp->size, first->size);
798 drop_err = -EIO;
f942dc25
IC
799 }
800
801 first->size -= txp->size;
2810e5b9 802 slots++;
f942dc25 803
d0089e8a 804 if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
68946159 805 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
f942dc25 806 txp->offset, txp->size);
e9ce7cb6 807 xenvif_fatal_tx_err(queue->vif);
35876b5f 808 return -EINVAL;
f942dc25 809 }
59ccb4eb
WL
810
811 more_data = txp->flags & XEN_NETTXF_more_data;
812
813 if (!drop_err)
814 txp++;
815
816 } while (more_data);
2810e5b9
WL
817
818 if (drop_err) {
e9ce7cb6 819 xenvif_tx_err(queue, first, cons + slots);
2810e5b9
WL
820 return drop_err;
821 }
822
823 return slots;
f942dc25
IC
824}
825
8f13dd96
ZK
826
827struct xenvif_tx_cb {
828 u16 pending_idx;
829};
830
831#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
832
e9ce7cb6 833static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
9074ce24
ZK
834 u16 pending_idx,
835 struct xen_netif_tx_request *txp,
836 struct gnttab_map_grant_ref *mop)
f53c3fe8 837{
e9ce7cb6
WL
838 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
839 gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
f53c3fe8 840 GNTMAP_host_map | GNTMAP_readonly,
e9ce7cb6 841 txp->gref, queue->vif->domid);
f53c3fe8 842
e9ce7cb6 843 memcpy(&queue->pending_tx_info[pending_idx].req, txp,
f53c3fe8
ZK
844 sizeof(*txp));
845}
846
e3377f36
ZK
847static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
848{
849 struct sk_buff *skb =
850 alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
851 GFP_ATOMIC | __GFP_NOWARN);
852 if (unlikely(skb == NULL))
853 return NULL;
854
855 /* Packets passed to netif_rx() must have some headroom. */
856 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
857
858 /* Initialize it here to avoid later surprises */
859 skb_shinfo(skb)->destructor_arg = NULL;
860
861 return skb;
862}
863
e9ce7cb6 864static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
f53c3fe8
ZK
865 struct sk_buff *skb,
866 struct xen_netif_tx_request *txp,
2475b225
RL
867 struct gnttab_map_grant_ref *gop,
868 unsigned int frag_overflow,
869 struct sk_buff *nskb)
f942dc25
IC
870{
871 struct skb_shared_info *shinfo = skb_shinfo(skb);
872 skb_frag_t *frags = shinfo->frags;
8f13dd96 873 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
62bad319
ZK
874 int start;
875 pending_ring_idx_t index;
2475b225 876 unsigned int nr_slots;
2810e5b9 877
2810e5b9 878 nr_slots = shinfo->nr_frags;
f942dc25
IC
879
880 /* Skip first skb fragment if it is on same page as header fragment. */
ea066ad1 881 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
f942dc25 882
f53c3fe8
ZK
883 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
884 shinfo->nr_frags++, txp++, gop++) {
e9ce7cb6
WL
885 index = pending_index(queue->pending_cons++);
886 pending_idx = queue->pending_ring[index];
887 xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
f53c3fe8 888 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
f942dc25
IC
889 }
890
e3377f36 891 if (frag_overflow) {
e3377f36
ZK
892
893 shinfo = skb_shinfo(nskb);
894 frags = shinfo->frags;
895
896 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
897 shinfo->nr_frags++, txp++, gop++) {
e9ce7cb6
WL
898 index = pending_index(queue->pending_cons++);
899 pending_idx = queue->pending_ring[index];
900 xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
e3377f36
ZK
901 frag_set_pending_idx(&frags[shinfo->nr_frags],
902 pending_idx);
903 }
904
905 skb_shinfo(skb)->frag_list = nskb;
906 }
2810e5b9 907
f942dc25
IC
908 return gop;
909}
910
e9ce7cb6 911static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
f53c3fe8
ZK
912 u16 pending_idx,
913 grant_handle_t handle)
914{
e9ce7cb6 915 if (unlikely(queue->grant_tx_handle[pending_idx] !=
f53c3fe8 916 NETBACK_INVALID_HANDLE)) {
e9ce7cb6 917 netdev_err(queue->vif->dev,
68946159 918 "Trying to overwrite active handle! pending_idx: 0x%x\n",
f53c3fe8
ZK
919 pending_idx);
920 BUG();
921 }
e9ce7cb6 922 queue->grant_tx_handle[pending_idx] = handle;
f53c3fe8
ZK
923}
924
e9ce7cb6 925static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
f53c3fe8
ZK
926 u16 pending_idx)
927{
e9ce7cb6 928 if (unlikely(queue->grant_tx_handle[pending_idx] ==
f53c3fe8 929 NETBACK_INVALID_HANDLE)) {
e9ce7cb6 930 netdev_err(queue->vif->dev,
68946159 931 "Trying to unmap invalid handle! pending_idx: 0x%x\n",
f53c3fe8
ZK
932 pending_idx);
933 BUG();
934 }
e9ce7cb6 935 queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
f53c3fe8
ZK
936}
937
e9ce7cb6 938static int xenvif_tx_check_gop(struct xenvif_queue *queue,
7376419a 939 struct sk_buff *skb,
bdab8275
ZK
940 struct gnttab_map_grant_ref **gopp_map,
941 struct gnttab_copy **gopp_copy)
f942dc25 942{
9074ce24 943 struct gnttab_map_grant_ref *gop_map = *gopp_map;
8f13dd96 944 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1a998d3e
ZK
945 /* This always points to the shinfo of the skb being checked, which
946 * could be either the first or the one on the frag_list
947 */
f942dc25 948 struct skb_shared_info *shinfo = skb_shinfo(skb);
1a998d3e
ZK
949 /* If this is non-NULL, we are currently checking the frag_list skb, and
950 * this points to the shinfo of the first one
951 */
952 struct skb_shared_info *first_shinfo = NULL;
f942dc25 953 int nr_frags = shinfo->nr_frags;
1b860da0
ZK
954 const bool sharedslot = nr_frags &&
955 frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
bdab8275 956 int i, err;
f942dc25
IC
957
958 /* Check status of header. */
bdab8275 959 err = (*gopp_copy)->status;
bdab8275
ZK
960 if (unlikely(err)) {
961 if (net_ratelimit())
e9ce7cb6 962 netdev_dbg(queue->vif->dev,
00aefceb 963 "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
bdab8275
ZK
964 (*gopp_copy)->status,
965 pending_idx,
966 (*gopp_copy)->source.u.ref);
1b860da0
ZK
967 /* The first frag might still have this slot mapped */
968 if (!sharedslot)
969 xenvif_idx_release(queue, pending_idx,
970 XEN_NETIF_RSP_ERROR);
bdab8275 971 }
d8cfbfc4 972 (*gopp_copy)++;
f942dc25 973
e3377f36 974check_frags:
bdab8275 975 for (i = 0; i < nr_frags; i++, gop_map++) {
f942dc25 976 int j, newerr;
f942dc25 977
ea066ad1 978 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
f942dc25
IC
979
980 /* Check error status: if okay then remember grant handle. */
bdab8275 981 newerr = gop_map->status;
2810e5b9 982
f942dc25 983 if (likely(!newerr)) {
e9ce7cb6 984 xenvif_grant_handle_set(queue,
9074ce24
ZK
985 pending_idx,
986 gop_map->handle);
f942dc25 987 /* Had a previous error? Invalidate this fragment. */
1b860da0 988 if (unlikely(err)) {
e9ce7cb6 989 xenvif_idx_unmap(queue, pending_idx);
1b860da0
ZK
990 /* If the mapping of the first frag was OK, but
991 * the header's copy failed, and they are
992 * sharing a slot, send an error
993 */
994 if (i == 0 && sharedslot)
995 xenvif_idx_release(queue, pending_idx,
996 XEN_NETIF_RSP_ERROR);
997 else
998 xenvif_idx_release(queue, pending_idx,
999 XEN_NETIF_RSP_OKAY);
1000 }
f942dc25
IC
1001 continue;
1002 }
1003
1004 /* Error on this fragment: respond to client with an error. */
bdab8275 1005 if (net_ratelimit())
e9ce7cb6 1006 netdev_dbg(queue->vif->dev,
00aefceb 1007 "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
bdab8275
ZK
1008 i,
1009 gop_map->status,
1010 pending_idx,
1011 gop_map->ref);
1b860da0 1012
e9ce7cb6 1013 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
f942dc25
IC
1014
1015 /* Not the first error? Preceding frags already invalidated. */
1016 if (err)
1017 continue;
1b860da0
ZK
1018
1019 /* First error: if the header haven't shared a slot with the
1020 * first frag, release it as well.
1021 */
1022 if (!sharedslot)
1023 xenvif_idx_release(queue,
1024 XENVIF_TX_CB(skb)->pending_idx,
1025 XEN_NETIF_RSP_OKAY);
1026
1027 /* Invalidate preceding fragments of this skb. */
bdab8275 1028 for (j = 0; j < i; j++) {
5ccb3ea7 1029 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
e9ce7cb6 1030 xenvif_idx_unmap(queue, pending_idx);
1b860da0
ZK
1031 xenvif_idx_release(queue, pending_idx,
1032 XEN_NETIF_RSP_OKAY);
f942dc25
IC
1033 }
1034
1a998d3e
ZK
1035 /* And if we found the error while checking the frag_list, unmap
1036 * the first skb's frags
1037 */
1038 if (first_shinfo) {
1039 for (j = 0; j < first_shinfo->nr_frags; j++) {
1040 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
1041 xenvif_idx_unmap(queue, pending_idx);
1b860da0
ZK
1042 xenvif_idx_release(queue, pending_idx,
1043 XEN_NETIF_RSP_OKAY);
1a998d3e 1044 }
f942dc25
IC
1045 }
1046
1047 /* Remember the error: invalidate all subsequent fragments. */
1048 err = newerr;
1049 }
1050
1a998d3e
ZK
1051 if (skb_has_frag_list(skb) && !first_shinfo) {
1052 first_shinfo = skb_shinfo(skb);
1053 shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
e3377f36 1054 nr_frags = shinfo->nr_frags;
e3377f36
ZK
1055
1056 goto check_frags;
1057 }
1058
bdab8275 1059 *gopp_map = gop_map;
f942dc25
IC
1060 return err;
1061}
1062
e9ce7cb6 1063static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
f942dc25
IC
1064{
1065 struct skb_shared_info *shinfo = skb_shinfo(skb);
1066 int nr_frags = shinfo->nr_frags;
1067 int i;
f53c3fe8
ZK
1068 u16 prev_pending_idx = INVALID_PENDING_IDX;
1069
f942dc25
IC
1070 for (i = 0; i < nr_frags; i++) {
1071 skb_frag_t *frag = shinfo->frags + i;
1072 struct xen_netif_tx_request *txp;
ea066ad1
IC
1073 struct page *page;
1074 u16 pending_idx;
f942dc25 1075
ea066ad1 1076 pending_idx = frag_get_pending_idx(frag);
f942dc25 1077
f53c3fe8 1078 /* If this is not the first frag, chain it to the previous*/
bdab8275 1079 if (prev_pending_idx == INVALID_PENDING_IDX)
f53c3fe8 1080 skb_shinfo(skb)->destructor_arg =
e9ce7cb6 1081 &callback_param(queue, pending_idx);
bdab8275 1082 else
e9ce7cb6
WL
1083 callback_param(queue, prev_pending_idx).ctx =
1084 &callback_param(queue, pending_idx);
f53c3fe8 1085
e9ce7cb6 1086 callback_param(queue, pending_idx).ctx = NULL;
f53c3fe8
ZK
1087 prev_pending_idx = pending_idx;
1088
e9ce7cb6
WL
1089 txp = &queue->pending_tx_info[pending_idx].req;
1090 page = virt_to_page(idx_to_kaddr(queue, pending_idx));
ea066ad1 1091 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
f942dc25
IC
1092 skb->len += txp->size;
1093 skb->data_len += txp->size;
1094 skb->truesize += txp->size;
1095
f53c3fe8 1096 /* Take an extra reference to offset network stack's put_page */
e9ce7cb6 1097 get_page(queue->mmap_pages[pending_idx]);
f942dc25
IC
1098 }
1099}
1100
e9ce7cb6 1101static int xenvif_get_extras(struct xenvif_queue *queue,
f942dc25
IC
1102 struct xen_netif_extra_info *extras,
1103 int work_to_do)
1104{
1105 struct xen_netif_extra_info extra;
e9ce7cb6 1106 RING_IDX cons = queue->tx.req_cons;
f942dc25
IC
1107
1108 do {
1109 if (unlikely(work_to_do-- <= 0)) {
e9ce7cb6
WL
1110 netdev_err(queue->vif->dev, "Missing extra info\n");
1111 xenvif_fatal_tx_err(queue->vif);
f942dc25
IC
1112 return -EBADR;
1113 }
1114
e9ce7cb6 1115 memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons),
f942dc25
IC
1116 sizeof(extra));
1117 if (unlikely(!extra.type ||
1118 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
e9ce7cb6
WL
1119 queue->tx.req_cons = ++cons;
1120 netdev_err(queue->vif->dev,
f942dc25 1121 "Invalid extra type: %d\n", extra.type);
e9ce7cb6 1122 xenvif_fatal_tx_err(queue->vif);
f942dc25
IC
1123 return -EINVAL;
1124 }
1125
1126 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
e9ce7cb6 1127 queue->tx.req_cons = ++cons;
f942dc25
IC
1128 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1129
1130 return work_to_do;
1131}
1132
7376419a
WL
1133static int xenvif_set_skb_gso(struct xenvif *vif,
1134 struct sk_buff *skb,
1135 struct xen_netif_extra_info *gso)
f942dc25
IC
1136{
1137 if (!gso->u.gso.size) {
48856286 1138 netdev_err(vif->dev, "GSO size must not be zero.\n");
7376419a 1139 xenvif_fatal_tx_err(vif);
f942dc25
IC
1140 return -EINVAL;
1141 }
1142
a9468587
PD
1143 switch (gso->u.gso.type) {
1144 case XEN_NETIF_GSO_TYPE_TCPV4:
1145 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1146 break;
1147 case XEN_NETIF_GSO_TYPE_TCPV6:
1148 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1149 break;
1150 default:
48856286 1151 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
7376419a 1152 xenvif_fatal_tx_err(vif);
f942dc25
IC
1153 return -EINVAL;
1154 }
1155
1156 skb_shinfo(skb)->gso_size = gso->u.gso.size;
b89587a7 1157 /* gso_segs will be calculated later */
f942dc25
IC
1158
1159 return 0;
1160}
1161
e9ce7cb6 1162static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
2eba61d5 1163{
2721637c 1164 bool recalculate_partial_csum = false;
2eba61d5
PD
1165
1166 /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1167 * peers can fail to set NETRXF_csum_blank when sending a GSO
1168 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1169 * recalculate the partial checksum.
1170 */
1171 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
e9ce7cb6 1172 queue->stats.rx_gso_checksum_fixup++;
2eba61d5 1173 skb->ip_summed = CHECKSUM_PARTIAL;
2721637c 1174 recalculate_partial_csum = true;
2eba61d5
PD
1175 }
1176
1177 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1178 if (skb->ip_summed != CHECKSUM_PARTIAL)
1179 return 0;
1180
2721637c 1181 return skb_checksum_setup(skb, recalculate_partial_csum);
2eba61d5
PD
1182}
1183
e9ce7cb6 1184static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
f942dc25 1185{
059dfa6a 1186 u64 now = get_jiffies_64();
e9ce7cb6
WL
1187 u64 next_credit = queue->credit_window_start +
1188 msecs_to_jiffies(queue->credit_usec / 1000);
f942dc25
IC
1189
1190 /* Timer could already be pending in rare cases. */
e9ce7cb6 1191 if (timer_pending(&queue->credit_timeout))
f942dc25
IC
1192 return true;
1193
1194 /* Passed the point where we can replenish credit? */
059dfa6a 1195 if (time_after_eq64(now, next_credit)) {
e9ce7cb6
WL
1196 queue->credit_window_start = now;
1197 tx_add_credit(queue);
f942dc25
IC
1198 }
1199
1200 /* Still too big to send right now? Set a callback. */
e9ce7cb6
WL
1201 if (size > queue->remaining_credit) {
1202 queue->credit_timeout.data =
1203 (unsigned long)queue;
e9ce7cb6 1204 mod_timer(&queue->credit_timeout,
f942dc25 1205 next_credit);
e9ce7cb6 1206 queue->credit_window_start = next_credit;
f942dc25
IC
1207
1208 return true;
1209 }
1210
1211 return false;
1212}
1213
210c34dc
PD
1214/* No locking is required in xenvif_mcast_add/del() as they are
1215 * only ever invoked from NAPI poll. An RCU list is used because
1216 * xenvif_mcast_match() is called asynchronously, during start_xmit.
1217 */
1218
1219static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
1220{
1221 struct xenvif_mcast_addr *mcast;
1222
1223 if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
1224 if (net_ratelimit())
1225 netdev_err(vif->dev,
1226 "Too many multicast addresses\n");
1227 return -ENOSPC;
1228 }
1229
1230 mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
1231 if (!mcast)
1232 return -ENOMEM;
1233
1234 ether_addr_copy(mcast->addr, addr);
1235 list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
1236 vif->fe_mcast_count++;
1237
1238 return 0;
1239}
1240
1241static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
1242{
1243 struct xenvif_mcast_addr *mcast;
1244
1245 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
1246 if (ether_addr_equal(addr, mcast->addr)) {
1247 --vif->fe_mcast_count;
1248 list_del_rcu(&mcast->entry);
1249 kfree_rcu(mcast, rcu);
1250 break;
1251 }
1252 }
1253}
1254
1255bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
1256{
1257 struct xenvif_mcast_addr *mcast;
1258
1259 rcu_read_lock();
1260 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
1261 if (ether_addr_equal(addr, mcast->addr)) {
1262 rcu_read_unlock();
1263 return true;
1264 }
1265 }
1266 rcu_read_unlock();
1267
1268 return false;
1269}
1270
1271void xenvif_mcast_addr_list_free(struct xenvif *vif)
1272{
1273 /* No need for locking or RCU here. NAPI poll and TX queue
1274 * are stopped.
1275 */
1276 while (!list_empty(&vif->fe_mcast_addr)) {
1277 struct xenvif_mcast_addr *mcast;
1278
1279 mcast = list_first_entry(&vif->fe_mcast_addr,
1280 struct xenvif_mcast_addr,
1281 entry);
1282 --vif->fe_mcast_count;
1283 list_del(&mcast->entry);
1284 kfree(mcast);
1285 }
1286}
1287
e9ce7cb6 1288static void xenvif_tx_build_gops(struct xenvif_queue *queue,
bdab8275
ZK
1289 int budget,
1290 unsigned *copy_ops,
1291 unsigned *map_ops)
f942dc25 1292{
2475b225
RL
1293 struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
1294 struct sk_buff *skb, *nskb;
f942dc25 1295 int ret;
2475b225 1296 unsigned int frag_overflow;
f942dc25 1297
e9ce7cb6 1298 while (skb_queue_len(&queue->tx_queue) < budget) {
f942dc25 1299 struct xen_netif_tx_request txreq;
37641494 1300 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
f942dc25
IC
1301 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1302 u16 pending_idx;
1303 RING_IDX idx;
1304 int work_to_do;
1305 unsigned int data_len;
1306 pending_ring_idx_t index;
1307
e9ce7cb6 1308 if (queue->tx.sring->req_prod - queue->tx.req_cons >
48856286 1309 XEN_NETIF_TX_RING_SIZE) {
e9ce7cb6 1310 netdev_err(queue->vif->dev,
48856286
IC
1311 "Impossible number of requests. "
1312 "req_prod %d, req_cons %d, size %ld\n",
e9ce7cb6 1313 queue->tx.sring->req_prod, queue->tx.req_cons,
48856286 1314 XEN_NETIF_TX_RING_SIZE);
e9ce7cb6 1315 xenvif_fatal_tx_err(queue->vif);
e9d8b2c2 1316 break;
48856286
IC
1317 }
1318
e9ce7cb6 1319 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
b3f980bd
WL
1320 if (!work_to_do)
1321 break;
f942dc25 1322
e9ce7cb6 1323 idx = queue->tx.req_cons;
f942dc25 1324 rmb(); /* Ensure that we see the request before we copy it. */
e9ce7cb6 1325 memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq));
f942dc25
IC
1326
1327 /* Credit-based scheduling. */
e9ce7cb6
WL
1328 if (txreq.size > queue->remaining_credit &&
1329 tx_credit_exceeded(queue, txreq.size))
b3f980bd 1330 break;
f942dc25 1331
e9ce7cb6 1332 queue->remaining_credit -= txreq.size;
f942dc25
IC
1333
1334 work_to_do--;
e9ce7cb6 1335 queue->tx.req_cons = ++idx;
f942dc25
IC
1336
1337 memset(extras, 0, sizeof(extras));
1338 if (txreq.flags & XEN_NETTXF_extra_info) {
e9ce7cb6 1339 work_to_do = xenvif_get_extras(queue, extras,
7376419a 1340 work_to_do);
e9ce7cb6 1341 idx = queue->tx.req_cons;
48856286 1342 if (unlikely(work_to_do < 0))
b3f980bd 1343 break;
f942dc25
IC
1344 }
1345
210c34dc
PD
1346 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
1347 struct xen_netif_extra_info *extra;
1348
1349 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
1350 ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
1351
1352 make_tx_response(queue, &txreq,
1353 (ret == 0) ?
1354 XEN_NETIF_RSP_OKAY :
1355 XEN_NETIF_RSP_ERROR);
1356 push_tx_responses(queue);
1357 continue;
1358 }
1359
1360 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
1361 struct xen_netif_extra_info *extra;
1362
1363 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
1364 xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
1365
1366 make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY);
1367 push_tx_responses(queue);
1368 continue;
1369 }
1370
e9ce7cb6 1371 ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
48856286 1372 if (unlikely(ret < 0))
b3f980bd 1373 break;
48856286 1374
f942dc25
IC
1375 idx += ret;
1376
1377 if (unlikely(txreq.size < ETH_HLEN)) {
e9ce7cb6 1378 netdev_dbg(queue->vif->dev,
f942dc25 1379 "Bad packet size: %d\n", txreq.size);
e9ce7cb6 1380 xenvif_tx_err(queue, &txreq, idx);
b3f980bd 1381 break;
f942dc25
IC
1382 }
1383
1384 /* No crossing a page as the payload mustn't fragment. */
d0089e8a 1385 if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
e9ce7cb6 1386 netdev_err(queue->vif->dev,
68946159 1387 "txreq.offset: %u, size: %u, end: %lu\n",
f942dc25 1388 txreq.offset, txreq.size,
d0089e8a 1389 (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
e9ce7cb6 1390 xenvif_fatal_tx_err(queue->vif);
b3f980bd 1391 break;
f942dc25
IC
1392 }
1393
e9ce7cb6
WL
1394 index = pending_index(queue->pending_cons);
1395 pending_idx = queue->pending_ring[index];
f942dc25 1396
7e5d7753 1397 data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
37641494 1398 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
7e5d7753 1399 XEN_NETBACK_TX_COPY_LEN : txreq.size;
f942dc25 1400
e3377f36 1401 skb = xenvif_alloc_skb(data_len);
f942dc25 1402 if (unlikely(skb == NULL)) {
e9ce7cb6 1403 netdev_dbg(queue->vif->dev,
f942dc25 1404 "Can't allocate a skb in start_xmit.\n");
e9ce7cb6 1405 xenvif_tx_err(queue, &txreq, idx);
f942dc25
IC
1406 break;
1407 }
1408
2475b225
RL
1409 skb_shinfo(skb)->nr_frags = ret;
1410 if (data_len < txreq.size)
1411 skb_shinfo(skb)->nr_frags++;
1412 /* At this point shinfo->nr_frags is in fact the number of
1413 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1414 */
1415 frag_overflow = 0;
1416 nskb = NULL;
1417 if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
1418 frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
1419 BUG_ON(frag_overflow > MAX_SKB_FRAGS);
1420 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
1421 nskb = xenvif_alloc_skb(0);
1422 if (unlikely(nskb == NULL)) {
1423 kfree_skb(skb);
1424 xenvif_tx_err(queue, &txreq, idx);
1425 if (net_ratelimit())
1426 netdev_err(queue->vif->dev,
1427 "Can't allocate the frag_list skb.\n");
1428 break;
1429 }
1430 }
1431
f942dc25
IC
1432 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1433 struct xen_netif_extra_info *gso;
1434 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1435
e9ce7cb6 1436 if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
7376419a 1437 /* Failure in xenvif_set_skb_gso is fatal. */
f942dc25 1438 kfree_skb(skb);
2475b225 1439 kfree_skb(nskb);
b3f980bd 1440 break;
f942dc25
IC
1441 }
1442 }
1443
8f13dd96 1444 XENVIF_TX_CB(skb)->pending_idx = pending_idx;
f942dc25
IC
1445
1446 __skb_put(skb, data_len);
e9ce7cb6
WL
1447 queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
1448 queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
1449 queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
bdab8275 1450
e9ce7cb6 1451 queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
0df4f266 1452 virt_to_gfn(skb->data);
e9ce7cb6
WL
1453 queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
1454 queue->tx_copy_ops[*copy_ops].dest.offset =
d0089e8a 1455 offset_in_page(skb->data) & ~XEN_PAGE_MASK;
bdab8275 1456
e9ce7cb6
WL
1457 queue->tx_copy_ops[*copy_ops].len = data_len;
1458 queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
bdab8275
ZK
1459
1460 (*copy_ops)++;
f942dc25 1461
f942dc25 1462 if (data_len < txreq.size) {
ea066ad1
IC
1463 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1464 pending_idx);
e9ce7cb6 1465 xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
bdab8275 1466 gop++;
f942dc25 1467 } else {
ea066ad1
IC
1468 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1469 INVALID_PENDING_IDX);
e9ce7cb6 1470 memcpy(&queue->pending_tx_info[pending_idx].req, &txreq,
bdab8275 1471 sizeof(txreq));
f942dc25
IC
1472 }
1473
e9ce7cb6 1474 queue->pending_cons++;
f942dc25 1475
2475b225
RL
1476 gop = xenvif_get_requests(queue, skb, txfrags, gop,
1477 frag_overflow, nskb);
f942dc25 1478
e9ce7cb6 1479 __skb_queue_tail(&queue->tx_queue, skb);
1e0b6eac 1480
e9ce7cb6 1481 queue->tx.req_cons = idx;
f942dc25 1482
e9ce7cb6
WL
1483 if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
1484 (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
f942dc25
IC
1485 break;
1486 }
1487
e9ce7cb6 1488 (*map_ops) = gop - queue->tx_map_ops;
bdab8275 1489 return;
f942dc25
IC
1490}
1491
e3377f36
ZK
1492/* Consolidate skb with a frag_list into a brand new one with local pages on
1493 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1494 */
e9ce7cb6 1495static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
e3377f36
ZK
1496{
1497 unsigned int offset = skb_headlen(skb);
1498 skb_frag_t frags[MAX_SKB_FRAGS];
49d9991a 1499 int i, f;
e3377f36
ZK
1500 struct ubuf_info *uarg;
1501 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1502
e9ce7cb6
WL
1503 queue->stats.tx_zerocopy_sent += 2;
1504 queue->stats.tx_frag_overflow++;
e3377f36 1505
e9ce7cb6 1506 xenvif_fill_frags(queue, nskb);
e3377f36
ZK
1507 /* Subtract frags size, we will correct it later */
1508 skb->truesize -= skb->data_len;
1509 skb->len += nskb->len;
1510 skb->data_len += nskb->len;
1511
1512 /* create a brand new frags array and coalesce there */
1513 for (i = 0; offset < skb->len; i++) {
1514 struct page *page;
1515 unsigned int len;
1516
1517 BUG_ON(i >= MAX_SKB_FRAGS);
44cc8ed1 1518 page = alloc_page(GFP_ATOMIC);
e3377f36
ZK
1519 if (!page) {
1520 int j;
1521 skb->truesize += skb->data_len;
1522 for (j = 0; j < i; j++)
1523 put_page(frags[j].page.p);
1524 return -ENOMEM;
1525 }
1526
1527 if (offset + PAGE_SIZE < skb->len)
1528 len = PAGE_SIZE;
1529 else
1530 len = skb->len - offset;
1531 if (skb_copy_bits(skb, offset, page_address(page), len))
1532 BUG();
1533
1534 offset += len;
1535 frags[i].page.p = page;
1536 frags[i].page_offset = 0;
1537 skb_frag_size_set(&frags[i], len);
1538 }
49d9991a 1539
b0c21bad
DV
1540 /* Copied all the bits from the frag list -- free it. */
1541 skb_frag_list_init(skb);
1542 xenvif_skb_zerocopy_prepare(queue, nskb);
1543 kfree_skb(nskb);
1544
49d9991a
DV
1545 /* Release all the original (foreign) frags. */
1546 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1547 skb_frag_unref(skb, f);
e3377f36 1548 uarg = skb_shinfo(skb)->destructor_arg;
a64bd934
WL
1549 /* increase inflight counter to offset decrement in callback */
1550 atomic_inc(&queue->inflight_packets);
e3377f36
ZK
1551 uarg->callback(uarg, true);
1552 skb_shinfo(skb)->destructor_arg = NULL;
1553
b0c21bad
DV
1554 /* Fill the skb with the new (local) frags. */
1555 memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
1556 skb_shinfo(skb)->nr_frags = i;
1557 skb->truesize += i * PAGE_SIZE;
e3377f36
ZK
1558
1559 return 0;
1560}
b3f980bd 1561
e9ce7cb6 1562static int xenvif_tx_submit(struct xenvif_queue *queue)
f942dc25 1563{
e9ce7cb6
WL
1564 struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1565 struct gnttab_copy *gop_copy = queue->tx_copy_ops;
f942dc25 1566 struct sk_buff *skb;
b3f980bd 1567 int work_done = 0;
f942dc25 1568
e9ce7cb6 1569 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
f942dc25 1570 struct xen_netif_tx_request *txp;
f942dc25
IC
1571 u16 pending_idx;
1572 unsigned data_len;
1573
8f13dd96 1574 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
e9ce7cb6 1575 txp = &queue->pending_tx_info[pending_idx].req;
f942dc25
IC
1576
1577 /* Check the remap error code. */
e9ce7cb6 1578 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
b42cc6e4
ZK
1579 /* If there was an error, xenvif_tx_check_gop is
1580 * expected to release all the frags which were mapped,
1581 * so kfree_skb shouldn't do it again
1582 */
f942dc25 1583 skb_shinfo(skb)->nr_frags = 0;
b42cc6e4
ZK
1584 if (skb_has_frag_list(skb)) {
1585 struct sk_buff *nskb =
1586 skb_shinfo(skb)->frag_list;
1587 skb_shinfo(nskb)->nr_frags = 0;
1588 }
f942dc25
IC
1589 kfree_skb(skb);
1590 continue;
1591 }
1592
1593 data_len = skb->len;
e9ce7cb6 1594 callback_param(queue, pending_idx).ctx = NULL;
f942dc25
IC
1595 if (data_len < txp->size) {
1596 /* Append the packet payload as a fragment. */
1597 txp->offset += data_len;
1598 txp->size -= data_len;
1599 } else {
1600 /* Schedule a response immediately. */
e9ce7cb6 1601 xenvif_idx_release(queue, pending_idx,
bdab8275 1602 XEN_NETIF_RSP_OKAY);
f942dc25
IC
1603 }
1604
1605 if (txp->flags & XEN_NETTXF_csum_blank)
1606 skb->ip_summed = CHECKSUM_PARTIAL;
1607 else if (txp->flags & XEN_NETTXF_data_validated)
1608 skb->ip_summed = CHECKSUM_UNNECESSARY;
1609
e9ce7cb6 1610 xenvif_fill_frags(queue, skb);
f942dc25 1611
e3377f36 1612 if (unlikely(skb_has_frag_list(skb))) {
e9ce7cb6 1613 if (xenvif_handle_frag_list(queue, skb)) {
e3377f36 1614 if (net_ratelimit())
e9ce7cb6 1615 netdev_err(queue->vif->dev,
e3377f36 1616 "Not enough memory to consolidate frag_list!\n");
a64bd934 1617 xenvif_skb_zerocopy_prepare(queue, skb);
e3377f36
ZK
1618 kfree_skb(skb);
1619 continue;
1620 }
1621 }
1622
e9ce7cb6 1623 skb->dev = queue->vif->dev;
f942dc25 1624 skb->protocol = eth_type_trans(skb, skb->dev);
f9ca8f74 1625 skb_reset_network_header(skb);
f942dc25 1626
e9ce7cb6
WL
1627 if (checksum_setup(queue, skb)) {
1628 netdev_dbg(queue->vif->dev,
f942dc25 1629 "Can't setup checksum in net_tx_action\n");
f53c3fe8
ZK
1630 /* We have to set this flag to trigger the callback */
1631 if (skb_shinfo(skb)->destructor_arg)
a64bd934 1632 xenvif_skb_zerocopy_prepare(queue, skb);
f942dc25
IC
1633 kfree_skb(skb);
1634 continue;
1635 }
1636
40893fd0 1637 skb_probe_transport_header(skb, 0);
f9ca8f74 1638
b89587a7
PD
1639 /* If the packet is GSO then we will have just set up the
1640 * transport header offset in checksum_setup so it's now
1641 * straightforward to calculate gso_segs.
1642 */
1643 if (skb_is_gso(skb)) {
1644 int mss = skb_shinfo(skb)->gso_size;
1645 int hdrlen = skb_transport_header(skb) -
1646 skb_mac_header(skb) +
1647 tcp_hdrlen(skb);
1648
1649 skb_shinfo(skb)->gso_segs =
1650 DIV_ROUND_UP(skb->len - hdrlen, mss);
1651 }
1652
e9ce7cb6
WL
1653 queue->stats.rx_bytes += skb->len;
1654 queue->stats.rx_packets++;
f942dc25 1655
b3f980bd
WL
1656 work_done++;
1657
f53c3fe8
ZK
1658 /* Set this flag right before netif_receive_skb, otherwise
1659 * someone might think this packet already left netback, and
1660 * do a skb_copy_ubufs while we are still in control of the
1661 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1662 */
1bb332af 1663 if (skb_shinfo(skb)->destructor_arg) {
a64bd934 1664 xenvif_skb_zerocopy_prepare(queue, skb);
e9ce7cb6 1665 queue->stats.tx_zerocopy_sent++;
1bb332af 1666 }
f53c3fe8 1667
b3f980bd 1668 netif_receive_skb(skb);
f942dc25 1669 }
b3f980bd
WL
1670
1671 return work_done;
f942dc25
IC
1672}
1673
3e2234b3
ZK
1674void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1675{
f53c3fe8
ZK
1676 unsigned long flags;
1677 pending_ring_idx_t index;
e9ce7cb6 1678 struct xenvif_queue *queue = ubuf_to_queue(ubuf);
f53c3fe8
ZK
1679
1680 /* This is the only place where we grab this lock, to protect callbacks
1681 * from each other.
1682 */
e9ce7cb6 1683 spin_lock_irqsave(&queue->callback_lock, flags);
f53c3fe8
ZK
1684 do {
1685 u16 pending_idx = ubuf->desc;
1686 ubuf = (struct ubuf_info *) ubuf->ctx;
e9ce7cb6 1687 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
f53c3fe8 1688 MAX_PENDING_REQS);
e9ce7cb6
WL
1689 index = pending_index(queue->dealloc_prod);
1690 queue->dealloc_ring[index] = pending_idx;
f53c3fe8
ZK
1691 /* Sync with xenvif_tx_dealloc_action:
1692 * insert idx then incr producer.
1693 */
1694 smp_wmb();
e9ce7cb6 1695 queue->dealloc_prod++;
f53c3fe8 1696 } while (ubuf);
e9ce7cb6 1697 spin_unlock_irqrestore(&queue->callback_lock, flags);
f53c3fe8 1698
1bb332af 1699 if (likely(zerocopy_success))
e9ce7cb6 1700 queue->stats.tx_zerocopy_success++;
1bb332af 1701 else
e9ce7cb6 1702 queue->stats.tx_zerocopy_fail++;
a64bd934 1703 xenvif_skb_zerocopy_complete(queue);
f53c3fe8
ZK
1704}
1705
e9ce7cb6 1706static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
f53c3fe8
ZK
1707{
1708 struct gnttab_unmap_grant_ref *gop;
1709 pending_ring_idx_t dc, dp;
1710 u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1711 unsigned int i = 0;
1712
e9ce7cb6
WL
1713 dc = queue->dealloc_cons;
1714 gop = queue->tx_unmap_ops;
f53c3fe8
ZK
1715
1716 /* Free up any grants we have finished using */
1717 do {
e9ce7cb6 1718 dp = queue->dealloc_prod;
f53c3fe8
ZK
1719
1720 /* Ensure we see all indices enqueued by all
1721 * xenvif_zerocopy_callback().
1722 */
1723 smp_rmb();
1724
1725 while (dc != dp) {
50c2e4dd 1726 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
f53c3fe8 1727 pending_idx =
e9ce7cb6 1728 queue->dealloc_ring[pending_index(dc++)];
f53c3fe8 1729
50c2e4dd 1730 pending_idx_release[gop - queue->tx_unmap_ops] =
f53c3fe8 1731 pending_idx;
50c2e4dd 1732 queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
e9ce7cb6 1733 queue->mmap_pages[pending_idx];
f53c3fe8 1734 gnttab_set_unmap_op(gop,
e9ce7cb6 1735 idx_to_kaddr(queue, pending_idx),
f53c3fe8 1736 GNTMAP_host_map,
e9ce7cb6
WL
1737 queue->grant_tx_handle[pending_idx]);
1738 xenvif_grant_handle_reset(queue, pending_idx);
f53c3fe8
ZK
1739 ++gop;
1740 }
1741
e9ce7cb6 1742 } while (dp != queue->dealloc_prod);
f53c3fe8 1743
e9ce7cb6 1744 queue->dealloc_cons = dc;
f53c3fe8 1745
e9ce7cb6 1746 if (gop - queue->tx_unmap_ops > 0) {
f53c3fe8 1747 int ret;
e9ce7cb6 1748 ret = gnttab_unmap_refs(queue->tx_unmap_ops,
f53c3fe8 1749 NULL,
e9ce7cb6
WL
1750 queue->pages_to_unmap,
1751 gop - queue->tx_unmap_ops);
f53c3fe8 1752 if (ret) {
68946159 1753 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
e9ce7cb6
WL
1754 gop - queue->tx_unmap_ops, ret);
1755 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
f53c3fe8 1756 if (gop[i].status != GNTST_okay)
e9ce7cb6 1757 netdev_err(queue->vif->dev,
68946159 1758 " host_addr: 0x%llx handle: 0x%x status: %d\n",
f53c3fe8
ZK
1759 gop[i].host_addr,
1760 gop[i].handle,
1761 gop[i].status);
1762 }
1763 BUG();
1764 }
1765 }
1766
e9ce7cb6
WL
1767 for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1768 xenvif_idx_release(queue, pending_idx_release[i],
f53c3fe8 1769 XEN_NETIF_RSP_OKAY);
3e2234b3
ZK
1770}
1771
f53c3fe8 1772
f942dc25 1773/* Called after netfront has transmitted */
e9ce7cb6 1774int xenvif_tx_action(struct xenvif_queue *queue, int budget)
f942dc25 1775{
bdab8275 1776 unsigned nr_mops, nr_cops = 0;
f53c3fe8 1777 int work_done, ret;
f942dc25 1778
e9ce7cb6 1779 if (unlikely(!tx_work_todo(queue)))
b3f980bd
WL
1780 return 0;
1781
e9ce7cb6 1782 xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
f942dc25 1783
bdab8275 1784 if (nr_cops == 0)
b3f980bd
WL
1785 return 0;
1786
e9ce7cb6 1787 gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
bdab8275 1788 if (nr_mops != 0) {
e9ce7cb6 1789 ret = gnttab_map_refs(queue->tx_map_ops,
bdab8275 1790 NULL,
e9ce7cb6 1791 queue->pages_to_map,
bdab8275
ZK
1792 nr_mops);
1793 BUG_ON(ret);
1794 }
f942dc25 1795
e9ce7cb6 1796 work_done = xenvif_tx_submit(queue);
f942dc25 1797
b3f980bd 1798 return work_done;
f942dc25
IC
1799}
1800
e9ce7cb6 1801static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
7376419a 1802 u8 status)
f942dc25 1803{
f942dc25 1804 struct pending_tx_info *pending_tx_info;
f53c3fe8 1805 pending_ring_idx_t index;
f53c3fe8 1806 unsigned long flags;
2810e5b9 1807
e9ce7cb6 1808 pending_tx_info = &queue->pending_tx_info[pending_idx];
7fbb9d84 1809
e9ce7cb6 1810 spin_lock_irqsave(&queue->response_lock, flags);
7fbb9d84 1811
e9ce7cb6 1812 make_tx_response(queue, &pending_tx_info->req, status);
7fbb9d84
DV
1813
1814 /* Release the pending index before pusing the Tx response so
1815 * its available before a new Tx request is pushed by the
1816 * frontend.
1817 */
1818 index = pending_index(queue->pending_prod++);
e9ce7cb6 1819 queue->pending_ring[index] = pending_idx;
7fbb9d84 1820
c8a4d299 1821 push_tx_responses(queue);
7fbb9d84 1822
e9ce7cb6 1823 spin_unlock_irqrestore(&queue->response_lock, flags);
f942dc25
IC
1824}
1825
2810e5b9 1826
e9ce7cb6 1827static void make_tx_response(struct xenvif_queue *queue,
f942dc25
IC
1828 struct xen_netif_tx_request *txp,
1829 s8 st)
1830{
e9ce7cb6 1831 RING_IDX i = queue->tx.rsp_prod_pvt;
f942dc25 1832 struct xen_netif_tx_response *resp;
f942dc25 1833
e9ce7cb6 1834 resp = RING_GET_RESPONSE(&queue->tx, i);
f942dc25
IC
1835 resp->id = txp->id;
1836 resp->status = st;
1837
1838 if (txp->flags & XEN_NETTXF_extra_info)
e9ce7cb6 1839 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
f942dc25 1840
e9ce7cb6 1841 queue->tx.rsp_prod_pvt = ++i;
f942dc25
IC
1842}
1843
c8a4d299
DV
1844static void push_tx_responses(struct xenvif_queue *queue)
1845{
1846 int notify;
1847
1848 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1849 if (notify)
1850 notify_remote_via_irq(queue->tx_irq);
1851}
1852
e9ce7cb6 1853static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
f942dc25
IC
1854 u16 id,
1855 s8 st,
1856 u16 offset,
1857 u16 size,
1858 u16 flags)
1859{
e9ce7cb6 1860 RING_IDX i = queue->rx.rsp_prod_pvt;
f942dc25
IC
1861 struct xen_netif_rx_response *resp;
1862
e9ce7cb6 1863 resp = RING_GET_RESPONSE(&queue->rx, i);
f942dc25
IC
1864 resp->offset = offset;
1865 resp->flags = flags;
1866 resp->id = id;
1867 resp->status = (s16)size;
1868 if (st < 0)
1869 resp->status = (s16)st;
1870
e9ce7cb6 1871 queue->rx.rsp_prod_pvt = ++i;
f942dc25
IC
1872
1873 return resp;
1874}
1875
e9ce7cb6 1876void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
f53c3fe8
ZK
1877{
1878 int ret;
1879 struct gnttab_unmap_grant_ref tx_unmap_op;
1880
1881 gnttab_set_unmap_op(&tx_unmap_op,
e9ce7cb6 1882 idx_to_kaddr(queue, pending_idx),
f53c3fe8 1883 GNTMAP_host_map,
e9ce7cb6
WL
1884 queue->grant_tx_handle[pending_idx]);
1885 xenvif_grant_handle_reset(queue, pending_idx);
f53c3fe8
ZK
1886
1887 ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
e9ce7cb6 1888 &queue->mmap_pages[pending_idx], 1);
7aceb47a 1889 if (ret) {
e9ce7cb6 1890 netdev_err(queue->vif->dev,
68946159 1891 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
7aceb47a
ZK
1892 ret,
1893 pending_idx,
1894 tx_unmap_op.host_addr,
1895 tx_unmap_op.handle,
1896 tx_unmap_op.status);
1897 BUG();
1898 }
f53c3fe8
ZK
1899}
1900
e9ce7cb6 1901static inline int tx_work_todo(struct xenvif_queue *queue)
f942dc25 1902{
e9ce7cb6 1903 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
f942dc25
IC
1904 return 1;
1905
1906 return 0;
1907}
1908
e9ce7cb6 1909static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
f53c3fe8 1910{
e9ce7cb6 1911 return queue->dealloc_cons != queue->dealloc_prod;
f53c3fe8
ZK
1912}
1913
e9ce7cb6 1914void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
f942dc25 1915{
e9ce7cb6
WL
1916 if (queue->tx.sring)
1917 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1918 queue->tx.sring);
1919 if (queue->rx.sring)
1920 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1921 queue->rx.sring);
f942dc25
IC
1922}
1923
e9ce7cb6 1924int xenvif_map_frontend_rings(struct xenvif_queue *queue,
7376419a
WL
1925 grant_ref_t tx_ring_ref,
1926 grant_ref_t rx_ring_ref)
f942dc25 1927{
c9d63699 1928 void *addr;
f942dc25
IC
1929 struct xen_netif_tx_sring *txs;
1930 struct xen_netif_rx_sring *rxs;
1931
1932 int err = -ENOMEM;
1933
e9ce7cb6 1934 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
ccc9d90a 1935 &tx_ring_ref, 1, &addr);
c9d63699 1936 if (err)
f942dc25
IC
1937 goto err;
1938
c9d63699 1939 txs = (struct xen_netif_tx_sring *)addr;
d0089e8a 1940 BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
f942dc25 1941
e9ce7cb6 1942 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
ccc9d90a 1943 &rx_ring_ref, 1, &addr);
c9d63699 1944 if (err)
f942dc25 1945 goto err;
f942dc25 1946
c9d63699 1947 rxs = (struct xen_netif_rx_sring *)addr;
d0089e8a 1948 BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
f942dc25
IC
1949
1950 return 0;
1951
1952err:
e9ce7cb6 1953 xenvif_unmap_frontend_rings(queue);
f942dc25
IC
1954 return err;
1955}
1956
ecf08d2d 1957static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
ca2f09f2 1958{
ecf08d2d
DV
1959 struct xenvif *vif = queue->vif;
1960
1961 queue->stalled = true;
1962
1963 /* At least one queue has stalled? Disable the carrier. */
1964 spin_lock(&vif->lock);
1965 if (vif->stalled_queues++ == 0) {
1966 netdev_info(vif->dev, "Guest Rx stalled");
1967 netif_carrier_off(vif->dev);
1968 }
1969 spin_unlock(&vif->lock);
ca2f09f2
PD
1970}
1971
ecf08d2d 1972static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
f34a4cf9 1973{
ecf08d2d 1974 struct xenvif *vif = queue->vif;
f34a4cf9 1975
ecf08d2d
DV
1976 queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
1977 queue->stalled = false;
f34a4cf9 1978
ecf08d2d
DV
1979 /* All queues are ready? Enable the carrier. */
1980 spin_lock(&vif->lock);
1981 if (--vif->stalled_queues == 0) {
1982 netdev_info(vif->dev, "Guest Rx ready");
1983 netif_carrier_on(vif->dev);
1984 }
1985 spin_unlock(&vif->lock);
1986}
f34a4cf9 1987
ecf08d2d
DV
1988static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
1989{
1990 RING_IDX prod, cons;
1991
1992 prod = queue->rx.sring->req_prod;
1993 cons = queue->rx.req_cons;
1994
1d5d4852 1995 return !queue->stalled && prod - cons < 1
ecf08d2d 1996 && time_after(jiffies,
26c0e102 1997 queue->last_rx_time + queue->vif->stall_timeout);
ecf08d2d
DV
1998}
1999
2000static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
2001{
2002 RING_IDX prod, cons;
2003
2004 prod = queue->rx.sring->req_prod;
2005 cons = queue->rx.req_cons;
2006
1d5d4852 2007 return queue->stalled && prod - cons >= 1;
ecf08d2d
DV
2008}
2009
f48da8b1 2010static bool xenvif_have_rx_work(struct xenvif_queue *queue)
ca2f09f2 2011{
f48da8b1 2012 return (!skb_queue_empty(&queue->rx_queue)
1d5d4852 2013 && xenvif_rx_ring_slots_available(queue))
26c0e102
DV
2014 || (queue->vif->stall_timeout &&
2015 (xenvif_rx_queue_stalled(queue)
2016 || xenvif_rx_queue_ready(queue)))
f48da8b1
DV
2017 || kthread_should_stop()
2018 || queue->vif->disabled;
ca2f09f2
PD
2019}
2020
f48da8b1 2021static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
f34a4cf9 2022{
f48da8b1
DV
2023 struct sk_buff *skb;
2024 long timeout;
f34a4cf9 2025
f48da8b1
DV
2026 skb = skb_peek(&queue->rx_queue);
2027 if (!skb)
2028 return MAX_SCHEDULE_TIMEOUT;
f34a4cf9 2029
f48da8b1
DV
2030 timeout = XENVIF_RX_CB(skb)->expires - jiffies;
2031 return timeout < 0 ? 0 : timeout;
2032}
f34a4cf9 2033
f48da8b1
DV
2034/* Wait until the guest Rx thread has work.
2035 *
2036 * The timeout needs to be adjusted based on the current head of the
2037 * queue (and not just the head at the beginning). In particular, if
2038 * the queue is initially empty an infinite timeout is used and this
2039 * needs to be reduced when a skb is queued.
2040 *
2041 * This cannot be done with wait_event_timeout() because it only
2042 * calculates the timeout once.
2043 */
2044static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
2045{
2046 DEFINE_WAIT(wait);
2047
2048 if (xenvif_have_rx_work(queue))
2049 return;
2050
2051 for (;;) {
2052 long ret;
2053
2054 prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
2055 if (xenvif_have_rx_work(queue))
2056 break;
2057 ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
2058 if (!ret)
2059 break;
f34a4cf9 2060 }
f48da8b1 2061 finish_wait(&queue->wq, &wait);
f34a4cf9
ZK
2062}
2063
121fa4b7 2064int xenvif_kthread_guest_rx(void *data)
b3f980bd 2065{
e9ce7cb6 2066 struct xenvif_queue *queue = data;
f48da8b1 2067 struct xenvif *vif = queue->vif;
b3f980bd 2068
26c0e102
DV
2069 if (!vif->stall_timeout)
2070 xenvif_queue_carrier_on(queue);
2071
f48da8b1
DV
2072 for (;;) {
2073 xenvif_wait_for_rx_work(queue);
e9d8b2c2 2074
f34a4cf9
ZK
2075 if (kthread_should_stop())
2076 break;
2077
e9d8b2c2
WL
2078 /* This frontend is found to be rogue, disable it in
2079 * kthread context. Currently this is only set when
2080 * netback finds out frontend sends malformed packet,
2081 * but we cannot disable the interface in softirq
e9ce7cb6
WL
2082 * context so we defer it here, if this thread is
2083 * associated with queue 0.
e9d8b2c2 2084 */
f48da8b1
DV
2085 if (unlikely(vif->disabled && queue->id == 0)) {
2086 xenvif_carrier_off(vif);
42b5212f 2087 break;
09350788
ZK
2088 }
2089
e9ce7cb6
WL
2090 if (!skb_queue_empty(&queue->rx_queue))
2091 xenvif_rx_action(queue);
b3f980bd 2092
ecf08d2d
DV
2093 /* If the guest hasn't provided any Rx slots for a
2094 * while it's probably not responsive, drop the
2095 * carrier so packets are dropped earlier.
2096 */
26c0e102
DV
2097 if (vif->stall_timeout) {
2098 if (xenvif_rx_queue_stalled(queue))
2099 xenvif_queue_carrier_off(queue);
2100 else if (xenvif_rx_queue_ready(queue))
2101 xenvif_queue_carrier_on(queue);
2102 }
ecf08d2d 2103
f48da8b1
DV
2104 /* Queued packets may have foreign pages from other
2105 * domains. These cannot be queued indefinitely as
2106 * this would starve guests of grant refs and transmit
2107 * slots.
2108 */
2109 xenvif_rx_queue_drop_expired(queue);
2110
2111 xenvif_rx_queue_maybe_wake(queue);
2112
b3f980bd
WL
2113 cond_resched();
2114 }
2115
ca2f09f2 2116 /* Bin any remaining skbs */
f48da8b1 2117 xenvif_rx_queue_purge(queue);
ca2f09f2 2118
b3f980bd
WL
2119 return 0;
2120}
2121
a64bd934
WL
2122static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
2123{
2124 /* Dealloc thread must remain running until all inflight
2125 * packets complete.
2126 */
2127 return kthread_should_stop() &&
2128 !atomic_read(&queue->inflight_packets);
2129}
2130
f53c3fe8
ZK
2131int xenvif_dealloc_kthread(void *data)
2132{
e9ce7cb6 2133 struct xenvif_queue *queue = data;
f53c3fe8 2134
a64bd934 2135 for (;;) {
e9ce7cb6
WL
2136 wait_event_interruptible(queue->dealloc_wq,
2137 tx_dealloc_work_todo(queue) ||
a64bd934
WL
2138 xenvif_dealloc_kthread_should_stop(queue));
2139 if (xenvif_dealloc_kthread_should_stop(queue))
f53c3fe8
ZK
2140 break;
2141
e9ce7cb6 2142 xenvif_tx_dealloc_action(queue);
f53c3fe8
ZK
2143 cond_resched();
2144 }
2145
2146 /* Unmap anything remaining*/
e9ce7cb6
WL
2147 if (tx_dealloc_work_todo(queue))
2148 xenvif_tx_dealloc_action(queue);
f53c3fe8
ZK
2149
2150 return 0;
2151}
2152
f942dc25
IC
2153static int __init netback_init(void)
2154{
f942dc25 2155 int rc = 0;
f942dc25 2156
2a14b244 2157 if (!xen_domain())
f942dc25
IC
2158 return -ENODEV;
2159
4c82ac3c
WL
2160 /* Allow as many queues as there are CPUs if user has not
2161 * specified a value.
2162 */
2163 if (xenvif_max_queues == 0)
2164 xenvif_max_queues = num_online_cpus();
8d3d53b3 2165
37641494 2166 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
383eda32
JP
2167 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
2168 fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
37641494 2169 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
2810e5b9
WL
2170 }
2171
f942dc25
IC
2172 rc = xenvif_xenbus_init();
2173 if (rc)
2174 goto failed_init;
2175
f51de243
ZK
2176#ifdef CONFIG_DEBUG_FS
2177 xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
2178 if (IS_ERR_OR_NULL(xen_netback_dbg_root))
2179 pr_warn("Init of debugfs returned %ld!\n",
2180 PTR_ERR(xen_netback_dbg_root));
2181#endif /* CONFIG_DEBUG_FS */
2182
f942dc25
IC
2183 return 0;
2184
2185failed_init:
f942dc25 2186 return rc;
f942dc25
IC
2187}
2188
2189module_init(netback_init);
2190
b103f358
WL
2191static void __exit netback_fini(void)
2192{
f51de243
ZK
2193#ifdef CONFIG_DEBUG_FS
2194 if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
2195 debugfs_remove_recursive(xen_netback_dbg_root);
2196#endif /* CONFIG_DEBUG_FS */
b103f358 2197 xenvif_xenbus_fini();
b103f358
WL
2198}
2199module_exit(netback_fini);
2200
f942dc25 2201MODULE_LICENSE("Dual BSD/GPL");
f984cec6 2202MODULE_ALIAS("xen-backend:vif");