xen-netback: coalesce slots in TX path and fix regressions
[linux-2.6-block.git] / drivers / net / xen-netback / netback.c
CommitLineData
f942dc25
IC
1/*
2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
7 *
8 * Copyright (c) 2002-2005, K A Fraser
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
22 *
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * IN THE SOFTWARE.
33 */
34
35#include "common.h"
36
37#include <linux/kthread.h>
38#include <linux/if_vlan.h>
39#include <linux/udp.h>
40
41#include <net/tcp.h>
42
ca981633 43#include <xen/xen.h>
f942dc25
IC
44#include <xen/events.h>
45#include <xen/interface/memory.h>
46
47#include <asm/xen/hypercall.h>
48#include <asm/xen/page.h>
49
2810e5b9
WL
50/*
51 * This is the maximum slots a skb can have. If a guest sends a skb
52 * which exceeds this limit it is considered malicious.
53 */
54#define MAX_SKB_SLOTS_DEFAULT 20
55static unsigned int max_skb_slots = MAX_SKB_SLOTS_DEFAULT;
56module_param(max_skb_slots, uint, 0444);
57
58typedef unsigned int pending_ring_idx_t;
59#define INVALID_PENDING_RING_IDX (~0U)
60
f942dc25 61struct pending_tx_info {
2810e5b9 62 struct xen_netif_tx_request req; /* coalesced tx request */
f942dc25 63 struct xenvif *vif;
2810e5b9
WL
64 pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
65 * if it is head of one or more tx
66 * reqs
67 */
f942dc25 68};
f942dc25
IC
69
70struct netbk_rx_meta {
71 int id;
72 int size;
73 int gso_size;
74};
75
76#define MAX_PENDING_REQS 256
77
ea066ad1
IC
78/* Discriminate from any valid pending_idx value. */
79#define INVALID_PENDING_IDX 0xFFFF
80
f942dc25
IC
81#define MAX_BUFFER_OFFSET PAGE_SIZE
82
83/* extra field used in struct page */
84union page_ext {
85 struct {
86#if BITS_PER_LONG < 64
87#define IDX_WIDTH 8
88#define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH)
89 unsigned int group:GROUP_WIDTH;
90 unsigned int idx:IDX_WIDTH;
91#else
92 unsigned int group, idx;
93#endif
94 } e;
95 void *mapping;
96};
97
98struct xen_netbk {
99 wait_queue_head_t wq;
100 struct task_struct *task;
101
102 struct sk_buff_head rx_queue;
103 struct sk_buff_head tx_queue;
104
105 struct timer_list net_timer;
106
107 struct page *mmap_pages[MAX_PENDING_REQS];
108
109 pending_ring_idx_t pending_prod;
110 pending_ring_idx_t pending_cons;
111 struct list_head net_schedule_list;
112
113 /* Protect the net_schedule_list in netif. */
114 spinlock_t net_schedule_list_lock;
115
116 atomic_t netfront_count;
117
118 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
2810e5b9
WL
119 /* Coalescing tx requests before copying makes number of grant
120 * copy ops greater or equal to number of slots required. In
121 * worst case a tx request consumes 2 gnttab_copy.
122 */
123 struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
f942dc25
IC
124
125 u16 pending_ring[MAX_PENDING_REQS];
126
127 /*
128 * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
129 * head/fragment page uses 2 copy operations because it
130 * straddles two buffers in the frontend.
131 */
132 struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
133 struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
134};
135
136static struct xen_netbk *xen_netbk;
137static int xen_netbk_group_nr;
138
2810e5b9
WL
139/*
140 * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
141 * one or more merged tx requests, otherwise it is the continuation of
142 * previous tx request.
143 */
144static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx)
145{
146 return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
147}
148
f942dc25
IC
149void xen_netbk_add_xenvif(struct xenvif *vif)
150{
151 int i;
152 int min_netfront_count;
153 int min_group = 0;
154 struct xen_netbk *netbk;
155
156 min_netfront_count = atomic_read(&xen_netbk[0].netfront_count);
157 for (i = 0; i < xen_netbk_group_nr; i++) {
158 int netfront_count = atomic_read(&xen_netbk[i].netfront_count);
159 if (netfront_count < min_netfront_count) {
160 min_group = i;
161 min_netfront_count = netfront_count;
162 }
163 }
164
165 netbk = &xen_netbk[min_group];
166
167 vif->netbk = netbk;
168 atomic_inc(&netbk->netfront_count);
169}
170
171void xen_netbk_remove_xenvif(struct xenvif *vif)
172{
173 struct xen_netbk *netbk = vif->netbk;
174 vif->netbk = NULL;
175 atomic_dec(&netbk->netfront_count);
176}
177
7d5145d8
MD
178static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
179 u8 status);
f942dc25
IC
180static void make_tx_response(struct xenvif *vif,
181 struct xen_netif_tx_request *txp,
182 s8 st);
183static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
184 u16 id,
185 s8 st,
186 u16 offset,
187 u16 size,
188 u16 flags);
189
190static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
ea066ad1 191 u16 idx)
f942dc25
IC
192{
193 return page_to_pfn(netbk->mmap_pages[idx]);
194}
195
196static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
ea066ad1 197 u16 idx)
f942dc25
IC
198{
199 return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
200}
201
202/* extra field used in struct page */
203static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk,
204 unsigned int idx)
205{
206 unsigned int group = netbk - xen_netbk;
207 union page_ext ext = { .e = { .group = group + 1, .idx = idx } };
208
209 BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping));
210 pg->mapping = ext.mapping;
211}
212
213static int get_page_ext(struct page *pg,
214 unsigned int *pgroup, unsigned int *pidx)
215{
216 union page_ext ext = { .mapping = pg->mapping };
217 struct xen_netbk *netbk;
218 unsigned int group, idx;
219
220 group = ext.e.group - 1;
221
222 if (group < 0 || group >= xen_netbk_group_nr)
223 return 0;
224
225 netbk = &xen_netbk[group];
226
227 idx = ext.e.idx;
228
229 if ((idx < 0) || (idx >= MAX_PENDING_REQS))
230 return 0;
231
232 if (netbk->mmap_pages[idx] != pg)
233 return 0;
234
235 *pgroup = group;
236 *pidx = idx;
237
238 return 1;
239}
240
241/*
242 * This is the amount of packet we copy rather than map, so that the
243 * guest can't fiddle with the contents of the headers while we do
244 * packet processing on them (netfilter, routing, etc).
245 */
246#define PKT_PROT_LEN (ETH_HLEN + \
247 VLAN_HLEN + \
248 sizeof(struct iphdr) + MAX_IPOPTLEN + \
249 sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
250
ea066ad1
IC
251static u16 frag_get_pending_idx(skb_frag_t *frag)
252{
253 return (u16)frag->page_offset;
254}
255
256static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
257{
258 frag->page_offset = pending_idx;
259}
260
f942dc25
IC
261static inline pending_ring_idx_t pending_index(unsigned i)
262{
263 return i & (MAX_PENDING_REQS-1);
264}
265
266static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk)
267{
268 return MAX_PENDING_REQS -
269 netbk->pending_prod + netbk->pending_cons;
270}
271
272static void xen_netbk_kick_thread(struct xen_netbk *netbk)
273{
274 wake_up(&netbk->wq);
275}
276
277static int max_required_rx_slots(struct xenvif *vif)
278{
279 int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
280
2810e5b9 281 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
f942dc25
IC
282 if (vif->can_sg || vif->gso || vif->gso_prefix)
283 max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
284
285 return max;
286}
287
288int xen_netbk_rx_ring_full(struct xenvif *vif)
289{
290 RING_IDX peek = vif->rx_req_cons_peek;
291 RING_IDX needed = max_required_rx_slots(vif);
292
293 return ((vif->rx.sring->req_prod - peek) < needed) ||
294 ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
295}
296
297int xen_netbk_must_stop_queue(struct xenvif *vif)
298{
299 if (!xen_netbk_rx_ring_full(vif))
300 return 0;
301
302 vif->rx.sring->req_event = vif->rx_req_cons_peek +
303 max_required_rx_slots(vif);
304 mb(); /* request notification /then/ check the queue */
305
306 return xen_netbk_rx_ring_full(vif);
307}
308
309/*
310 * Returns true if we should start a new receive buffer instead of
311 * adding 'size' bytes to a buffer which currently contains 'offset'
312 * bytes.
313 */
314static bool start_new_rx_buffer(int offset, unsigned long size, int head)
315{
316 /* simple case: we have completely filled the current buffer. */
317 if (offset == MAX_BUFFER_OFFSET)
318 return true;
319
320 /*
321 * complex case: start a fresh buffer if the current frag
322 * would overflow the current buffer but only if:
323 * (i) this frag would fit completely in the next buffer
324 * and (ii) there is already some data in the current buffer
325 * and (iii) this is not the head buffer.
326 *
327 * Where:
328 * - (i) stops us splitting a frag into two copies
329 * unless the frag is too large for a single buffer.
330 * - (ii) stops us from leaving a buffer pointlessly empty.
331 * - (iii) stops us leaving the first buffer
332 * empty. Strictly speaking this is already covered
333 * by (ii) but is explicitly checked because
334 * netfront relies on the first buffer being
335 * non-empty and can crash otherwise.
336 *
337 * This means we will effectively linearise small
338 * frags but do not needlessly split large buffers
339 * into multiple copies tend to give large frags their
340 * own buffers as before.
341 */
342 if ((offset + size > MAX_BUFFER_OFFSET) &&
343 (size <= MAX_BUFFER_OFFSET) && offset && !head)
344 return true;
345
346 return false;
347}
348
349/*
350 * Figure out how many ring slots we're going to need to send @skb to
351 * the guest. This function is essentially a dry run of
352 * netbk_gop_frag_copy.
353 */
354unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
355{
356 unsigned int count;
357 int i, copy_off;
358
e26b203e 359 count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
f942dc25
IC
360
361 copy_off = skb_headlen(skb) % PAGE_SIZE;
362
363 if (skb_shinfo(skb)->gso_size)
364 count++;
365
366 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 367 unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
6a8ed462 368 unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
f942dc25 369 unsigned long bytes;
6a8ed462
IC
370
371 offset &= ~PAGE_MASK;
372
f942dc25 373 while (size > 0) {
6a8ed462 374 BUG_ON(offset >= PAGE_SIZE);
f942dc25
IC
375 BUG_ON(copy_off > MAX_BUFFER_OFFSET);
376
6a8ed462
IC
377 bytes = PAGE_SIZE - offset;
378
379 if (bytes > size)
380 bytes = size;
381
382 if (start_new_rx_buffer(copy_off, bytes, 0)) {
f942dc25
IC
383 count++;
384 copy_off = 0;
385 }
386
f942dc25
IC
387 if (copy_off + bytes > MAX_BUFFER_OFFSET)
388 bytes = MAX_BUFFER_OFFSET - copy_off;
389
390 copy_off += bytes;
6a8ed462
IC
391
392 offset += bytes;
f942dc25 393 size -= bytes;
6a8ed462
IC
394
395 if (offset == PAGE_SIZE)
396 offset = 0;
f942dc25
IC
397 }
398 }
399 return count;
400}
401
402struct netrx_pending_operations {
403 unsigned copy_prod, copy_cons;
404 unsigned meta_prod, meta_cons;
405 struct gnttab_copy *copy;
406 struct netbk_rx_meta *meta;
407 int copy_off;
408 grant_ref_t copy_gref;
409};
410
411static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
412 struct netrx_pending_operations *npo)
413{
414 struct netbk_rx_meta *meta;
415 struct xen_netif_rx_request *req;
416
417 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
418
419 meta = npo->meta + npo->meta_prod++;
420 meta->gso_size = 0;
421 meta->size = 0;
422 meta->id = req->id;
423
424 npo->copy_off = 0;
425 npo->copy_gref = req->gref;
426
427 return meta;
428}
429
430/*
431 * Set up the grant operations for this fragment. If it's a flipping
432 * interface, we also set up the unmap request from here.
433 */
434static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
435 struct netrx_pending_operations *npo,
436 struct page *page, unsigned long size,
437 unsigned long offset, int *head)
438{
439 struct gnttab_copy *copy_gop;
440 struct netbk_rx_meta *meta;
441 /*
e34c0246 442 * These variables are used iff get_page_ext returns true,
f942dc25
IC
443 * in which case they are guaranteed to be initialized.
444 */
445 unsigned int uninitialized_var(group), uninitialized_var(idx);
446 int foreign = get_page_ext(page, &group, &idx);
447 unsigned long bytes;
448
449 /* Data must not cross a page boundary. */
6a8ed462 450 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
f942dc25
IC
451
452 meta = npo->meta + npo->meta_prod - 1;
453
6a8ed462
IC
454 /* Skip unused frames from start of page */
455 page += offset >> PAGE_SHIFT;
456 offset &= ~PAGE_MASK;
457
f942dc25 458 while (size > 0) {
6a8ed462 459 BUG_ON(offset >= PAGE_SIZE);
f942dc25
IC
460 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
461
6a8ed462
IC
462 bytes = PAGE_SIZE - offset;
463
464 if (bytes > size)
465 bytes = size;
466
467 if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
f942dc25
IC
468 /*
469 * Netfront requires there to be some data in the head
470 * buffer.
471 */
472 BUG_ON(*head);
473
474 meta = get_next_rx_buffer(vif, npo);
475 }
476
f942dc25
IC
477 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
478 bytes = MAX_BUFFER_OFFSET - npo->copy_off;
479
480 copy_gop = npo->copy + npo->copy_prod++;
481 copy_gop->flags = GNTCOPY_dest_gref;
482 if (foreign) {
483 struct xen_netbk *netbk = &xen_netbk[group];
484 struct pending_tx_info *src_pend;
485
486 src_pend = &netbk->pending_tx_info[idx];
487
488 copy_gop->source.domid = src_pend->vif->domid;
489 copy_gop->source.u.ref = src_pend->req.gref;
490 copy_gop->flags |= GNTCOPY_source_gref;
491 } else {
492 void *vaddr = page_address(page);
493 copy_gop->source.domid = DOMID_SELF;
494 copy_gop->source.u.gmfn = virt_to_mfn(vaddr);
495 }
496 copy_gop->source.offset = offset;
497 copy_gop->dest.domid = vif->domid;
498
499 copy_gop->dest.offset = npo->copy_off;
500 copy_gop->dest.u.ref = npo->copy_gref;
501 copy_gop->len = bytes;
502
503 npo->copy_off += bytes;
504 meta->size += bytes;
505
506 offset += bytes;
507 size -= bytes;
508
6a8ed462
IC
509 /* Next frame */
510 if (offset == PAGE_SIZE && size) {
511 BUG_ON(!PageCompound(page));
512 page++;
513 offset = 0;
514 }
515
f942dc25
IC
516 /* Leave a gap for the GSO descriptor. */
517 if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
518 vif->rx.req_cons++;
519
520 *head = 0; /* There must be something in this buffer now. */
521
522 }
523}
524
525/*
526 * Prepare an SKB to be transmitted to the frontend.
527 *
528 * This function is responsible for allocating grant operations, meta
529 * structures, etc.
530 *
531 * It returns the number of meta structures consumed. The number of
532 * ring slots used is always equal to the number of meta slots used
533 * plus the number of GSO descriptors used. Currently, we use either
534 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
535 * frontend-side LRO).
536 */
537static int netbk_gop_skb(struct sk_buff *skb,
538 struct netrx_pending_operations *npo)
539{
540 struct xenvif *vif = netdev_priv(skb->dev);
541 int nr_frags = skb_shinfo(skb)->nr_frags;
542 int i;
543 struct xen_netif_rx_request *req;
544 struct netbk_rx_meta *meta;
545 unsigned char *data;
546 int head = 1;
547 int old_meta_prod;
548
549 old_meta_prod = npo->meta_prod;
550
551 /* Set up a GSO prefix descriptor, if necessary */
552 if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
553 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
554 meta = npo->meta + npo->meta_prod++;
555 meta->gso_size = skb_shinfo(skb)->gso_size;
556 meta->size = 0;
557 meta->id = req->id;
558 }
559
560 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
561 meta = npo->meta + npo->meta_prod++;
562
563 if (!vif->gso_prefix)
564 meta->gso_size = skb_shinfo(skb)->gso_size;
565 else
566 meta->gso_size = 0;
567
568 meta->size = 0;
569 meta->id = req->id;
570 npo->copy_off = 0;
571 npo->copy_gref = req->gref;
572
573 data = skb->data;
574 while (data < skb_tail_pointer(skb)) {
575 unsigned int offset = offset_in_page(data);
576 unsigned int len = PAGE_SIZE - offset;
577
578 if (data + len > skb_tail_pointer(skb))
579 len = skb_tail_pointer(skb) - data;
580
581 netbk_gop_frag_copy(vif, skb, npo,
582 virt_to_page(data), len, offset, &head);
583 data += len;
584 }
585
586 for (i = 0; i < nr_frags; i++) {
587 netbk_gop_frag_copy(vif, skb, npo,
ea066ad1 588 skb_frag_page(&skb_shinfo(skb)->frags[i]),
9e903e08 589 skb_frag_size(&skb_shinfo(skb)->frags[i]),
f942dc25
IC
590 skb_shinfo(skb)->frags[i].page_offset,
591 &head);
592 }
593
594 return npo->meta_prod - old_meta_prod;
595}
596
597/*
598 * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
599 * used to set up the operations on the top of
600 * netrx_pending_operations, which have since been done. Check that
601 * they didn't give any errors and advance over them.
602 */
603static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
604 struct netrx_pending_operations *npo)
605{
606 struct gnttab_copy *copy_op;
607 int status = XEN_NETIF_RSP_OKAY;
608 int i;
609
610 for (i = 0; i < nr_meta_slots; i++) {
611 copy_op = npo->copy + npo->copy_cons++;
612 if (copy_op->status != GNTST_okay) {
613 netdev_dbg(vif->dev,
614 "Bad status %d from copy to DOM%d.\n",
615 copy_op->status, vif->domid);
616 status = XEN_NETIF_RSP_ERROR;
617 }
618 }
619
620 return status;
621}
622
623static void netbk_add_frag_responses(struct xenvif *vif, int status,
624 struct netbk_rx_meta *meta,
625 int nr_meta_slots)
626{
627 int i;
628 unsigned long offset;
629
630 /* No fragments used */
631 if (nr_meta_slots <= 1)
632 return;
633
634 nr_meta_slots--;
635
636 for (i = 0; i < nr_meta_slots; i++) {
637 int flags;
638 if (i == nr_meta_slots - 1)
639 flags = 0;
640 else
641 flags = XEN_NETRXF_more_data;
642
643 offset = 0;
644 make_rx_response(vif, meta[i].id, status, offset,
645 meta[i].size, flags);
646 }
647}
648
649struct skb_cb_overlay {
650 int meta_slots_used;
651};
652
653static void xen_netbk_rx_action(struct xen_netbk *netbk)
654{
655 struct xenvif *vif = NULL, *tmp;
656 s8 status;
657 u16 irq, flags;
658 struct xen_netif_rx_response *resp;
659 struct sk_buff_head rxq;
660 struct sk_buff *skb;
661 LIST_HEAD(notify);
662 int ret;
663 int nr_frags;
664 int count;
665 unsigned long offset;
666 struct skb_cb_overlay *sco;
667
668 struct netrx_pending_operations npo = {
669 .copy = netbk->grant_copy_op,
670 .meta = netbk->meta,
671 };
672
673 skb_queue_head_init(&rxq);
674
675 count = 0;
676
677 while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
678 vif = netdev_priv(skb->dev);
679 nr_frags = skb_shinfo(skb)->nr_frags;
680
681 sco = (struct skb_cb_overlay *)skb->cb;
682 sco->meta_slots_used = netbk_gop_skb(skb, &npo);
683
684 count += nr_frags + 1;
685
686 __skb_queue_tail(&rxq, skb);
687
688 /* Filled the batch queue? */
2810e5b9 689 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
f942dc25
IC
690 if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
691 break;
692 }
693
694 BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
695
696 if (!npo.copy_prod)
697 return;
698
699 BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
c571898f 700 gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod);
f942dc25
IC
701
702 while ((skb = __skb_dequeue(&rxq)) != NULL) {
703 sco = (struct skb_cb_overlay *)skb->cb;
704
705 vif = netdev_priv(skb->dev);
706
707 if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
708 resp = RING_GET_RESPONSE(&vif->rx,
709 vif->rx.rsp_prod_pvt++);
710
711 resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
712
713 resp->offset = netbk->meta[npo.meta_cons].gso_size;
714 resp->id = netbk->meta[npo.meta_cons].id;
715 resp->status = sco->meta_slots_used;
716
717 npo.meta_cons++;
718 sco->meta_slots_used--;
719 }
720
721
722 vif->dev->stats.tx_bytes += skb->len;
723 vif->dev->stats.tx_packets++;
724
725 status = netbk_check_gop(vif, sco->meta_slots_used, &npo);
726
727 if (sco->meta_slots_used == 1)
728 flags = 0;
729 else
730 flags = XEN_NETRXF_more_data;
731
732 if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
733 flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
734 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
735 /* remote but checksummed. */
736 flags |= XEN_NETRXF_data_validated;
737
738 offset = 0;
739 resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id,
740 status, offset,
741 netbk->meta[npo.meta_cons].size,
742 flags);
743
744 if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
745 struct xen_netif_extra_info *gso =
746 (struct xen_netif_extra_info *)
747 RING_GET_RESPONSE(&vif->rx,
748 vif->rx.rsp_prod_pvt++);
749
750 resp->flags |= XEN_NETRXF_extra_info;
751
752 gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size;
753 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
754 gso->u.gso.pad = 0;
755 gso->u.gso.features = 0;
756
757 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
758 gso->flags = 0;
759 }
760
761 netbk_add_frag_responses(vif, status,
762 netbk->meta + npo.meta_cons + 1,
763 sco->meta_slots_used);
764
765 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
766 irq = vif->irq;
767 if (ret && list_empty(&vif->notify_list))
768 list_add_tail(&vif->notify_list, &notify);
769
770 xenvif_notify_tx_completion(vif);
771
772 xenvif_put(vif);
773 npo.meta_cons += sco->meta_slots_used;
774 dev_kfree_skb(skb);
775 }
776
777 list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
778 notify_remote_via_irq(vif->irq);
779 list_del_init(&vif->notify_list);
780 }
781
782 /* More work to do? */
783 if (!skb_queue_empty(&netbk->rx_queue) &&
784 !timer_pending(&netbk->net_timer))
785 xen_netbk_kick_thread(netbk);
786}
787
788void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
789{
790 struct xen_netbk *netbk = vif->netbk;
791
792 skb_queue_tail(&netbk->rx_queue, skb);
793
794 xen_netbk_kick_thread(netbk);
795}
796
797static void xen_netbk_alarm(unsigned long data)
798{
799 struct xen_netbk *netbk = (struct xen_netbk *)data;
800 xen_netbk_kick_thread(netbk);
801}
802
803static int __on_net_schedule_list(struct xenvif *vif)
804{
805 return !list_empty(&vif->schedule_list);
806}
807
808/* Must be called with net_schedule_list_lock held */
809static void remove_from_net_schedule_list(struct xenvif *vif)
810{
811 if (likely(__on_net_schedule_list(vif))) {
812 list_del_init(&vif->schedule_list);
813 xenvif_put(vif);
814 }
815}
816
817static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk)
818{
819 struct xenvif *vif = NULL;
820
821 spin_lock_irq(&netbk->net_schedule_list_lock);
822 if (list_empty(&netbk->net_schedule_list))
823 goto out;
824
825 vif = list_first_entry(&netbk->net_schedule_list,
826 struct xenvif, schedule_list);
827 if (!vif)
828 goto out;
829
830 xenvif_get(vif);
831
832 remove_from_net_schedule_list(vif);
833out:
834 spin_unlock_irq(&netbk->net_schedule_list_lock);
835 return vif;
836}
837
838void xen_netbk_schedule_xenvif(struct xenvif *vif)
839{
840 unsigned long flags;
841 struct xen_netbk *netbk = vif->netbk;
842
843 if (__on_net_schedule_list(vif))
844 goto kick;
845
846 spin_lock_irqsave(&netbk->net_schedule_list_lock, flags);
847 if (!__on_net_schedule_list(vif) &&
848 likely(xenvif_schedulable(vif))) {
849 list_add_tail(&vif->schedule_list, &netbk->net_schedule_list);
850 xenvif_get(vif);
851 }
852 spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags);
853
854kick:
855 smp_mb();
856 if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
857 !list_empty(&netbk->net_schedule_list))
858 xen_netbk_kick_thread(netbk);
859}
860
861void xen_netbk_deschedule_xenvif(struct xenvif *vif)
862{
863 struct xen_netbk *netbk = vif->netbk;
864 spin_lock_irq(&netbk->net_schedule_list_lock);
865 remove_from_net_schedule_list(vif);
866 spin_unlock_irq(&netbk->net_schedule_list_lock);
867}
868
869void xen_netbk_check_rx_xenvif(struct xenvif *vif)
870{
871 int more_to_do;
872
873 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
874
875 if (more_to_do)
876 xen_netbk_schedule_xenvif(vif);
877}
878
879static void tx_add_credit(struct xenvif *vif)
880{
881 unsigned long max_burst, max_credit;
882
883 /*
884 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
885 * Otherwise the interface can seize up due to insufficient credit.
886 */
887 max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
888 max_burst = min(max_burst, 131072UL);
889 max_burst = max(max_burst, vif->credit_bytes);
890
891 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
892 max_credit = vif->remaining_credit + vif->credit_bytes;
893 if (max_credit < vif->remaining_credit)
894 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
895
896 vif->remaining_credit = min(max_credit, max_burst);
897}
898
899static void tx_credit_callback(unsigned long data)
900{
901 struct xenvif *vif = (struct xenvif *)data;
902 tx_add_credit(vif);
903 xen_netbk_check_rx_xenvif(vif);
904}
905
906static void netbk_tx_err(struct xenvif *vif,
907 struct xen_netif_tx_request *txp, RING_IDX end)
908{
909 RING_IDX cons = vif->tx.req_cons;
910
911 do {
912 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
b9149729 913 if (cons == end)
f942dc25
IC
914 break;
915 txp = RING_GET_REQUEST(&vif->tx, cons++);
916 } while (1);
917 vif->tx.req_cons = cons;
918 xen_netbk_check_rx_xenvif(vif);
919 xenvif_put(vif);
920}
921
48856286
IC
922static void netbk_fatal_tx_err(struct xenvif *vif)
923{
924 netdev_err(vif->dev, "fatal error; disabling device\n");
925 xenvif_carrier_off(vif);
629821d9 926 xenvif_put(vif);
48856286
IC
927}
928
f942dc25
IC
929static int netbk_count_requests(struct xenvif *vif,
930 struct xen_netif_tx_request *first,
2810e5b9 931 RING_IDX first_idx,
f942dc25
IC
932 struct xen_netif_tx_request *txp,
933 int work_to_do)
934{
935 RING_IDX cons = vif->tx.req_cons;
2810e5b9
WL
936 int slots = 0;
937 int drop_err = 0;
f942dc25
IC
938
939 if (!(first->flags & XEN_NETTXF_more_data))
940 return 0;
941
942 do {
2810e5b9
WL
943 if (slots >= work_to_do) {
944 netdev_err(vif->dev,
945 "Asked for %d slots but exceeds this limit\n",
946 work_to_do);
48856286 947 netbk_fatal_tx_err(vif);
35876b5f 948 return -ENODATA;
f942dc25
IC
949 }
950
2810e5b9
WL
951 /* This guest is really using too many slots and
952 * considered malicious.
953 */
954 if (unlikely(slots >= max_skb_slots)) {
955 netdev_err(vif->dev,
956 "Malicious frontend using %d slots, threshold %u\n",
957 slots, max_skb_slots);
48856286 958 netbk_fatal_tx_err(vif);
35876b5f 959 return -E2BIG;
f942dc25
IC
960 }
961
2810e5b9
WL
962 /* Xen network protocol had implicit dependency on
963 * MAX_SKB_FRAGS. XEN_NETIF_NR_SLOTS_MIN is set to the
964 * historical MAX_SKB_FRAGS value 18 to honor the same
965 * behavior as before. Any packet using more than 18
966 * slots but less than max_skb_slots slots is dropped
967 */
968 if (!drop_err && slots >= XEN_NETIF_NR_SLOTS_MIN) {
969 if (net_ratelimit())
970 netdev_dbg(vif->dev,
971 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
972 slots, XEN_NETIF_NR_SLOTS_MIN);
973 drop_err = -E2BIG;
974 }
975
976 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
f942dc25
IC
977 sizeof(*txp));
978 if (txp->size > first->size) {
2810e5b9
WL
979 netdev_err(vif->dev,
980 "Invalid tx request, slot size %u > remaining size %u\n",
981 txp->size, first->size);
48856286 982 netbk_fatal_tx_err(vif);
35876b5f 983 return -EIO;
f942dc25
IC
984 }
985
986 first->size -= txp->size;
2810e5b9 987 slots++;
f942dc25
IC
988
989 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
2810e5b9 990 netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
f942dc25 991 txp->offset, txp->size);
48856286 992 netbk_fatal_tx_err(vif);
35876b5f 993 return -EINVAL;
f942dc25
IC
994 }
995 } while ((txp++)->flags & XEN_NETTXF_more_data);
2810e5b9
WL
996
997 if (drop_err) {
998 netbk_tx_err(vif, first, first_idx + slots);
999 return drop_err;
1000 }
1001
1002 return slots;
f942dc25
IC
1003}
1004
1005static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
ea066ad1 1006 u16 pending_idx)
f942dc25
IC
1007{
1008 struct page *page;
1009 page = alloc_page(GFP_KERNEL|__GFP_COLD);
1010 if (!page)
1011 return NULL;
1012 set_page_ext(page, netbk, pending_idx);
1013 netbk->mmap_pages[pending_idx] = page;
1014 return page;
1015}
1016
1017static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
1018 struct xenvif *vif,
1019 struct sk_buff *skb,
1020 struct xen_netif_tx_request *txp,
1021 struct gnttab_copy *gop)
1022{
1023 struct skb_shared_info *shinfo = skb_shinfo(skb);
1024 skb_frag_t *frags = shinfo->frags;
ea066ad1 1025 u16 pending_idx = *((u16 *)skb->data);
2810e5b9
WL
1026 u16 head_idx = 0;
1027 int slot, start;
1028 struct page *page;
1029 pending_ring_idx_t index, start_idx = 0;
1030 uint16_t dst_offset;
1031 unsigned int nr_slots;
1032 struct pending_tx_info *first = NULL;
1033
1034 /* At this point shinfo->nr_frags is in fact the number of
1035 * slots, which can be as large as XEN_NETIF_NR_SLOTS_MIN.
1036 */
1037 nr_slots = shinfo->nr_frags;
f942dc25
IC
1038
1039 /* Skip first skb fragment if it is on same page as header fragment. */
ea066ad1 1040 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
f942dc25 1041
2810e5b9
WL
1042 /* Coalesce tx requests, at this point the packet passed in
1043 * should be <= 64K. Any packets larger than 64K have been
1044 * handled in netbk_count_requests().
1045 */
1046 for (shinfo->nr_frags = slot = start; slot < nr_slots;
1047 shinfo->nr_frags++) {
f942dc25
IC
1048 struct pending_tx_info *pending_tx_info =
1049 netbk->pending_tx_info;
1050
2810e5b9 1051 page = alloc_page(GFP_KERNEL|__GFP_COLD);
f942dc25 1052 if (!page)
4cc7c1cb 1053 goto err;
f942dc25 1054
2810e5b9
WL
1055 dst_offset = 0;
1056 first = NULL;
1057 while (dst_offset < PAGE_SIZE && slot < nr_slots) {
1058 gop->flags = GNTCOPY_source_gref;
1059
1060 gop->source.u.ref = txp->gref;
1061 gop->source.domid = vif->domid;
1062 gop->source.offset = txp->offset;
1063
1064 gop->dest.domid = DOMID_SELF;
1065
1066 gop->dest.offset = dst_offset;
1067 gop->dest.u.gmfn = virt_to_mfn(page_address(page));
1068
1069 if (dst_offset + txp->size > PAGE_SIZE) {
1070 /* This page can only merge a portion
1071 * of tx request. Do not increment any
1072 * pointer / counter here. The txp
1073 * will be dealt with in future
1074 * rounds, eventually hitting the
1075 * `else` branch.
1076 */
1077 gop->len = PAGE_SIZE - dst_offset;
1078 txp->offset += gop->len;
1079 txp->size -= gop->len;
1080 dst_offset += gop->len; /* quit loop */
1081 } else {
1082 /* This tx request can be merged in the page */
1083 gop->len = txp->size;
1084 dst_offset += gop->len;
1085
1086 index = pending_index(netbk->pending_cons++);
1087
1088 pending_idx = netbk->pending_ring[index];
1089
1090 memcpy(&pending_tx_info[pending_idx].req, txp,
1091 sizeof(*txp));
1092 xenvif_get(vif);
1093
1094 pending_tx_info[pending_idx].vif = vif;
1095
1096 /* Poison these fields, corresponding
1097 * fields for head tx req will be set
1098 * to correct values after the loop.
1099 */
1100 netbk->mmap_pages[pending_idx] = (void *)(~0UL);
1101 pending_tx_info[pending_idx].head =
1102 INVALID_PENDING_RING_IDX;
1103
1104 if (!first) {
1105 first = &pending_tx_info[pending_idx];
1106 start_idx = index;
1107 head_idx = pending_idx;
1108 }
1109
1110 txp++;
1111 slot++;
1112 }
f942dc25 1113
2810e5b9
WL
1114 gop++;
1115 }
f942dc25 1116
2810e5b9
WL
1117 first->req.offset = 0;
1118 first->req.size = dst_offset;
1119 first->head = start_idx;
1120 set_page_ext(page, netbk, head_idx);
1121 netbk->mmap_pages[head_idx] = page;
1122 frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
f942dc25
IC
1123 }
1124
2810e5b9
WL
1125 BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
1126
f942dc25 1127 return gop;
4cc7c1cb
IC
1128err:
1129 /* Unwind, freeing all pages and sending error responses. */
2810e5b9
WL
1130 while (shinfo->nr_frags-- > start) {
1131 xen_netbk_idx_release(netbk,
1132 frag_get_pending_idx(&frags[shinfo->nr_frags]),
1133 XEN_NETIF_RSP_ERROR);
4cc7c1cb
IC
1134 }
1135 /* The head too, if necessary. */
1136 if (start)
1137 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1138
1139 return NULL;
f942dc25
IC
1140}
1141
1142static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1143 struct sk_buff *skb,
1144 struct gnttab_copy **gopp)
1145{
1146 struct gnttab_copy *gop = *gopp;
ea066ad1 1147 u16 pending_idx = *((u16 *)skb->data);
f942dc25 1148 struct skb_shared_info *shinfo = skb_shinfo(skb);
2810e5b9 1149 struct pending_tx_info *tx_info;
f942dc25
IC
1150 int nr_frags = shinfo->nr_frags;
1151 int i, err, start;
2810e5b9 1152 u16 peek; /* peek into next tx request */
f942dc25
IC
1153
1154 /* Check status of header. */
1155 err = gop->status;
7d5145d8
MD
1156 if (unlikely(err))
1157 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
f942dc25
IC
1158
1159 /* Skip first skb fragment if it is on same page as header fragment. */
ea066ad1 1160 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
f942dc25
IC
1161
1162 for (i = start; i < nr_frags; i++) {
1163 int j, newerr;
2810e5b9 1164 pending_ring_idx_t head;
f942dc25 1165
ea066ad1 1166 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
2810e5b9
WL
1167 tx_info = &netbk->pending_tx_info[pending_idx];
1168 head = tx_info->head;
f942dc25
IC
1169
1170 /* Check error status: if okay then remember grant handle. */
2810e5b9
WL
1171 do {
1172 newerr = (++gop)->status;
1173 if (newerr)
1174 break;
1175 peek = netbk->pending_ring[pending_index(++head)];
1176 } while (!pending_tx_is_head(netbk, peek));
1177
f942dc25
IC
1178 if (likely(!newerr)) {
1179 /* Had a previous error? Invalidate this fragment. */
1180 if (unlikely(err))
7d5145d8 1181 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
f942dc25
IC
1182 continue;
1183 }
1184
1185 /* Error on this fragment: respond to client with an error. */
7d5145d8 1186 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
f942dc25
IC
1187
1188 /* Not the first error? Preceding frags already invalidated. */
1189 if (err)
1190 continue;
1191
1192 /* First error: invalidate header and preceding fragments. */
1193 pending_idx = *((u16 *)skb->data);
7d5145d8 1194 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
f942dc25 1195 for (j = start; j < i; j++) {
5ccb3ea7 1196 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
7d5145d8 1197 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
f942dc25
IC
1198 }
1199
1200 /* Remember the error: invalidate all subsequent fragments. */
1201 err = newerr;
1202 }
1203
1204 *gopp = gop + 1;
1205 return err;
1206}
1207
1208static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
1209{
1210 struct skb_shared_info *shinfo = skb_shinfo(skb);
1211 int nr_frags = shinfo->nr_frags;
1212 int i;
1213
1214 for (i = 0; i < nr_frags; i++) {
1215 skb_frag_t *frag = shinfo->frags + i;
1216 struct xen_netif_tx_request *txp;
ea066ad1
IC
1217 struct page *page;
1218 u16 pending_idx;
f942dc25 1219
ea066ad1 1220 pending_idx = frag_get_pending_idx(frag);
f942dc25
IC
1221
1222 txp = &netbk->pending_tx_info[pending_idx].req;
ea066ad1
IC
1223 page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
1224 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
f942dc25
IC
1225 skb->len += txp->size;
1226 skb->data_len += txp->size;
1227 skb->truesize += txp->size;
1228
1229 /* Take an extra reference to offset xen_netbk_idx_release */
1230 get_page(netbk->mmap_pages[pending_idx]);
7d5145d8 1231 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
f942dc25
IC
1232 }
1233}
1234
1235static int xen_netbk_get_extras(struct xenvif *vif,
1236 struct xen_netif_extra_info *extras,
1237 int work_to_do)
1238{
1239 struct xen_netif_extra_info extra;
1240 RING_IDX cons = vif->tx.req_cons;
1241
1242 do {
1243 if (unlikely(work_to_do-- <= 0)) {
48856286
IC
1244 netdev_err(vif->dev, "Missing extra info\n");
1245 netbk_fatal_tx_err(vif);
f942dc25
IC
1246 return -EBADR;
1247 }
1248
1249 memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
1250 sizeof(extra));
1251 if (unlikely(!extra.type ||
1252 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1253 vif->tx.req_cons = ++cons;
48856286 1254 netdev_err(vif->dev,
f942dc25 1255 "Invalid extra type: %d\n", extra.type);
48856286 1256 netbk_fatal_tx_err(vif);
f942dc25
IC
1257 return -EINVAL;
1258 }
1259
1260 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1261 vif->tx.req_cons = ++cons;
1262 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1263
1264 return work_to_do;
1265}
1266
1267static int netbk_set_skb_gso(struct xenvif *vif,
1268 struct sk_buff *skb,
1269 struct xen_netif_extra_info *gso)
1270{
1271 if (!gso->u.gso.size) {
48856286
IC
1272 netdev_err(vif->dev, "GSO size must not be zero.\n");
1273 netbk_fatal_tx_err(vif);
f942dc25
IC
1274 return -EINVAL;
1275 }
1276
1277 /* Currently only TCPv4 S.O. is supported. */
1278 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
48856286
IC
1279 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1280 netbk_fatal_tx_err(vif);
f942dc25
IC
1281 return -EINVAL;
1282 }
1283
1284 skb_shinfo(skb)->gso_size = gso->u.gso.size;
1285 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1286
1287 /* Header must be checked, and gso_segs computed. */
1288 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1289 skb_shinfo(skb)->gso_segs = 0;
1290
1291 return 0;
1292}
1293
1294static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1295{
1296 struct iphdr *iph;
f942dc25
IC
1297 int err = -EPROTO;
1298 int recalculate_partial_csum = 0;
1299
1300 /*
1301 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1302 * peers can fail to set NETRXF_csum_blank when sending a GSO
1303 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1304 * recalculate the partial checksum.
1305 */
1306 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1307 vif->rx_gso_checksum_fixup++;
1308 skb->ip_summed = CHECKSUM_PARTIAL;
1309 recalculate_partial_csum = 1;
1310 }
1311
1312 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1313 if (skb->ip_summed != CHECKSUM_PARTIAL)
1314 return 0;
1315
1316 if (skb->protocol != htons(ETH_P_IP))
1317 goto out;
1318
1319 iph = (void *)skb->data;
f942dc25
IC
1320 switch (iph->protocol) {
1321 case IPPROTO_TCP:
bea89336
JW
1322 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
1323 offsetof(struct tcphdr, check)))
1324 goto out;
f942dc25
IC
1325
1326 if (recalculate_partial_csum) {
bea89336 1327 struct tcphdr *tcph = tcp_hdr(skb);
f942dc25
IC
1328 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1329 skb->len - iph->ihl*4,
1330 IPPROTO_TCP, 0);
1331 }
1332 break;
1333 case IPPROTO_UDP:
bea89336
JW
1334 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
1335 offsetof(struct udphdr, check)))
1336 goto out;
f942dc25
IC
1337
1338 if (recalculate_partial_csum) {
bea89336 1339 struct udphdr *udph = udp_hdr(skb);
f942dc25
IC
1340 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1341 skb->len - iph->ihl*4,
1342 IPPROTO_UDP, 0);
1343 }
1344 break;
1345 default:
1346 if (net_ratelimit())
1347 netdev_err(vif->dev,
1348 "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
1349 iph->protocol);
1350 goto out;
1351 }
1352
f942dc25
IC
1353 err = 0;
1354
1355out:
1356 return err;
1357}
1358
1359static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1360{
1361 unsigned long now = jiffies;
1362 unsigned long next_credit =
1363 vif->credit_timeout.expires +
1364 msecs_to_jiffies(vif->credit_usec / 1000);
1365
1366 /* Timer could already be pending in rare cases. */
1367 if (timer_pending(&vif->credit_timeout))
1368 return true;
1369
1370 /* Passed the point where we can replenish credit? */
1371 if (time_after_eq(now, next_credit)) {
1372 vif->credit_timeout.expires = now;
1373 tx_add_credit(vif);
1374 }
1375
1376 /* Still too big to send right now? Set a callback. */
1377 if (size > vif->remaining_credit) {
1378 vif->credit_timeout.data =
1379 (unsigned long)vif;
1380 vif->credit_timeout.function =
1381 tx_credit_callback;
1382 mod_timer(&vif->credit_timeout,
1383 next_credit);
1384
1385 return true;
1386 }
1387
1388 return false;
1389}
1390
1391static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1392{
1393 struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop;
1394 struct sk_buff *skb;
1395 int ret;
1396
2810e5b9
WL
1397 while ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN
1398 < MAX_PENDING_REQS) &&
f942dc25
IC
1399 !list_empty(&netbk->net_schedule_list)) {
1400 struct xenvif *vif;
1401 struct xen_netif_tx_request txreq;
2810e5b9 1402 struct xen_netif_tx_request txfrags[max_skb_slots];
f942dc25
IC
1403 struct page *page;
1404 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1405 u16 pending_idx;
1406 RING_IDX idx;
1407 int work_to_do;
1408 unsigned int data_len;
1409 pending_ring_idx_t index;
1410
1411 /* Get a netif from the list with work to do. */
1412 vif = poll_net_schedule_list(netbk);
48856286
IC
1413 /* This can sometimes happen because the test of
1414 * list_empty(net_schedule_list) at the top of the
1415 * loop is unlocked. Just go back and have another
1416 * look.
1417 */
f942dc25
IC
1418 if (!vif)
1419 continue;
1420
48856286
IC
1421 if (vif->tx.sring->req_prod - vif->tx.req_cons >
1422 XEN_NETIF_TX_RING_SIZE) {
1423 netdev_err(vif->dev,
1424 "Impossible number of requests. "
1425 "req_prod %d, req_cons %d, size %ld\n",
1426 vif->tx.sring->req_prod, vif->tx.req_cons,
1427 XEN_NETIF_TX_RING_SIZE);
1428 netbk_fatal_tx_err(vif);
1429 continue;
1430 }
1431
f942dc25
IC
1432 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
1433 if (!work_to_do) {
1434 xenvif_put(vif);
1435 continue;
1436 }
1437
1438 idx = vif->tx.req_cons;
1439 rmb(); /* Ensure that we see the request before we copy it. */
1440 memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
1441
1442 /* Credit-based scheduling. */
1443 if (txreq.size > vif->remaining_credit &&
1444 tx_credit_exceeded(vif, txreq.size)) {
1445 xenvif_put(vif);
1446 continue;
1447 }
1448
1449 vif->remaining_credit -= txreq.size;
1450
1451 work_to_do--;
1452 vif->tx.req_cons = ++idx;
1453
1454 memset(extras, 0, sizeof(extras));
1455 if (txreq.flags & XEN_NETTXF_extra_info) {
1456 work_to_do = xen_netbk_get_extras(vif, extras,
1457 work_to_do);
1458 idx = vif->tx.req_cons;
48856286 1459 if (unlikely(work_to_do < 0))
f942dc25 1460 continue;
f942dc25
IC
1461 }
1462
2810e5b9
WL
1463 ret = netbk_count_requests(vif, &txreq, idx,
1464 txfrags, work_to_do);
48856286 1465 if (unlikely(ret < 0))
f942dc25 1466 continue;
48856286 1467
f942dc25
IC
1468 idx += ret;
1469
1470 if (unlikely(txreq.size < ETH_HLEN)) {
1471 netdev_dbg(vif->dev,
1472 "Bad packet size: %d\n", txreq.size);
1473 netbk_tx_err(vif, &txreq, idx);
1474 continue;
1475 }
1476
1477 /* No crossing a page as the payload mustn't fragment. */
1478 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
48856286 1479 netdev_err(vif->dev,
f942dc25
IC
1480 "txreq.offset: %x, size: %u, end: %lu\n",
1481 txreq.offset, txreq.size,
1482 (txreq.offset&~PAGE_MASK) + txreq.size);
48856286 1483 netbk_fatal_tx_err(vif);
f942dc25
IC
1484 continue;
1485 }
1486
1487 index = pending_index(netbk->pending_cons);
1488 pending_idx = netbk->pending_ring[index];
1489
1490 data_len = (txreq.size > PKT_PROT_LEN &&
2810e5b9 1491 ret < XEN_NETIF_NR_SLOTS_MIN) ?
f942dc25
IC
1492 PKT_PROT_LEN : txreq.size;
1493
1494 skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
1495 GFP_ATOMIC | __GFP_NOWARN);
1496 if (unlikely(skb == NULL)) {
1497 netdev_dbg(vif->dev,
1498 "Can't allocate a skb in start_xmit.\n");
1499 netbk_tx_err(vif, &txreq, idx);
1500 break;
1501 }
1502
1503 /* Packets passed to netif_rx() must have some headroom. */
1504 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1505
1506 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1507 struct xen_netif_extra_info *gso;
1508 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1509
1510 if (netbk_set_skb_gso(vif, skb, gso)) {
48856286 1511 /* Failure in netbk_set_skb_gso is fatal. */
f942dc25 1512 kfree_skb(skb);
f942dc25
IC
1513 continue;
1514 }
1515 }
1516
1517 /* XXX could copy straight to head */
27f85228 1518 page = xen_netbk_alloc_page(netbk, pending_idx);
f942dc25
IC
1519 if (!page) {
1520 kfree_skb(skb);
1521 netbk_tx_err(vif, &txreq, idx);
1522 continue;
1523 }
1524
f942dc25
IC
1525 gop->source.u.ref = txreq.gref;
1526 gop->source.domid = vif->domid;
1527 gop->source.offset = txreq.offset;
1528
1529 gop->dest.u.gmfn = virt_to_mfn(page_address(page));
1530 gop->dest.domid = DOMID_SELF;
1531 gop->dest.offset = txreq.offset;
1532
1533 gop->len = txreq.size;
1534 gop->flags = GNTCOPY_source_gref;
1535
1536 gop++;
1537
1538 memcpy(&netbk->pending_tx_info[pending_idx].req,
1539 &txreq, sizeof(txreq));
1540 netbk->pending_tx_info[pending_idx].vif = vif;
2810e5b9 1541 netbk->pending_tx_info[pending_idx].head = index;
f942dc25
IC
1542 *((u16 *)skb->data) = pending_idx;
1543
1544 __skb_put(skb, data_len);
1545
1546 skb_shinfo(skb)->nr_frags = ret;
1547 if (data_len < txreq.size) {
1548 skb_shinfo(skb)->nr_frags++;
ea066ad1
IC
1549 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1550 pending_idx);
f942dc25 1551 } else {
ea066ad1
IC
1552 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1553 INVALID_PENDING_IDX);
f942dc25
IC
1554 }
1555
f942dc25
IC
1556 netbk->pending_cons++;
1557
1558 request_gop = xen_netbk_get_requests(netbk, vif,
1559 skb, txfrags, gop);
1560 if (request_gop == NULL) {
1561 kfree_skb(skb);
1562 netbk_tx_err(vif, &txreq, idx);
1563 continue;
1564 }
1565 gop = request_gop;
1566
1e0b6eac
AL
1567 __skb_queue_tail(&netbk->tx_queue, skb);
1568
f942dc25
IC
1569 vif->tx.req_cons = idx;
1570 xen_netbk_check_rx_xenvif(vif);
1571
1572 if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops))
1573 break;
1574 }
1575
1576 return gop - netbk->tx_copy_ops;
1577}
1578
1579static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1580{
1581 struct gnttab_copy *gop = netbk->tx_copy_ops;
1582 struct sk_buff *skb;
1583
1584 while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) {
1585 struct xen_netif_tx_request *txp;
1586 struct xenvif *vif;
1587 u16 pending_idx;
1588 unsigned data_len;
1589
1590 pending_idx = *((u16 *)skb->data);
1591 vif = netbk->pending_tx_info[pending_idx].vif;
1592 txp = &netbk->pending_tx_info[pending_idx].req;
1593
1594 /* Check the remap error code. */
1595 if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) {
1596 netdev_dbg(vif->dev, "netback grant failed.\n");
1597 skb_shinfo(skb)->nr_frags = 0;
1598 kfree_skb(skb);
1599 continue;
1600 }
1601
1602 data_len = skb->len;
1603 memcpy(skb->data,
1604 (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset),
1605 data_len);
1606 if (data_len < txp->size) {
1607 /* Append the packet payload as a fragment. */
1608 txp->offset += data_len;
1609 txp->size -= data_len;
1610 } else {
1611 /* Schedule a response immediately. */
7d5145d8 1612 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
f942dc25
IC
1613 }
1614
1615 if (txp->flags & XEN_NETTXF_csum_blank)
1616 skb->ip_summed = CHECKSUM_PARTIAL;
1617 else if (txp->flags & XEN_NETTXF_data_validated)
1618 skb->ip_summed = CHECKSUM_UNNECESSARY;
1619
1620 xen_netbk_fill_frags(netbk, skb);
1621
1622 /*
1623 * If the initial fragment was < PKT_PROT_LEN then
1624 * pull through some bytes from the other fragments to
1625 * increase the linear region to PKT_PROT_LEN bytes.
1626 */
1627 if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
1628 int target = min_t(int, skb->len, PKT_PROT_LEN);
1629 __pskb_pull_tail(skb, target - skb_headlen(skb));
1630 }
1631
1632 skb->dev = vif->dev;
1633 skb->protocol = eth_type_trans(skb, skb->dev);
f9ca8f74 1634 skb_reset_network_header(skb);
f942dc25
IC
1635
1636 if (checksum_setup(vif, skb)) {
1637 netdev_dbg(vif->dev,
1638 "Can't setup checksum in net_tx_action\n");
1639 kfree_skb(skb);
1640 continue;
1641 }
1642
40893fd0 1643 skb_probe_transport_header(skb, 0);
f9ca8f74 1644
f942dc25
IC
1645 vif->dev->stats.rx_bytes += skb->len;
1646 vif->dev->stats.rx_packets++;
1647
1648 xenvif_receive_skb(vif, skb);
1649 }
1650}
1651
1652/* Called after netfront has transmitted */
1653static void xen_netbk_tx_action(struct xen_netbk *netbk)
1654{
1655 unsigned nr_gops;
f942dc25
IC
1656
1657 nr_gops = xen_netbk_tx_build_gops(netbk);
1658
1659 if (nr_gops == 0)
1660 return;
f942dc25 1661
c571898f 1662 gnttab_batch_copy(netbk->tx_copy_ops, nr_gops);
f942dc25 1663
c571898f 1664 xen_netbk_tx_submit(netbk);
f942dc25
IC
1665}
1666
7d5145d8
MD
1667static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
1668 u8 status)
f942dc25
IC
1669{
1670 struct xenvif *vif;
1671 struct pending_tx_info *pending_tx_info;
2810e5b9
WL
1672 pending_ring_idx_t head;
1673 u16 peek; /* peek into next tx request */
1674
1675 BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL));
f942dc25
IC
1676
1677 /* Already complete? */
1678 if (netbk->mmap_pages[pending_idx] == NULL)
1679 return;
1680
1681 pending_tx_info = &netbk->pending_tx_info[pending_idx];
1682
1683 vif = pending_tx_info->vif;
2810e5b9 1684 head = pending_tx_info->head;
f942dc25 1685
2810e5b9
WL
1686 BUG_ON(!pending_tx_is_head(netbk, head));
1687 BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx);
f942dc25 1688
2810e5b9
WL
1689 do {
1690 pending_ring_idx_t index;
1691 pending_ring_idx_t idx = pending_index(head);
1692 u16 info_idx = netbk->pending_ring[idx];
f942dc25 1693
2810e5b9
WL
1694 pending_tx_info = &netbk->pending_tx_info[info_idx];
1695 make_tx_response(vif, &pending_tx_info->req, status);
1696
1697 /* Setting any number other than
1698 * INVALID_PENDING_RING_IDX indicates this slot is
1699 * starting a new packet / ending a previous packet.
1700 */
1701 pending_tx_info->head = 0;
1702
1703 index = pending_index(netbk->pending_prod++);
1704 netbk->pending_ring[index] = netbk->pending_ring[info_idx];
f942dc25 1705
2810e5b9
WL
1706 xenvif_put(vif);
1707
1708 peek = netbk->pending_ring[pending_index(++head)];
1709
1710 } while (!pending_tx_is_head(netbk, peek));
1711
1712 netbk->mmap_pages[pending_idx]->mapping = 0;
f942dc25
IC
1713 put_page(netbk->mmap_pages[pending_idx]);
1714 netbk->mmap_pages[pending_idx] = NULL;
1715}
1716
2810e5b9 1717
f942dc25
IC
1718static void make_tx_response(struct xenvif *vif,
1719 struct xen_netif_tx_request *txp,
1720 s8 st)
1721{
1722 RING_IDX i = vif->tx.rsp_prod_pvt;
1723 struct xen_netif_tx_response *resp;
1724 int notify;
1725
1726 resp = RING_GET_RESPONSE(&vif->tx, i);
1727 resp->id = txp->id;
1728 resp->status = st;
1729
1730 if (txp->flags & XEN_NETTXF_extra_info)
1731 RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1732
1733 vif->tx.rsp_prod_pvt = ++i;
1734 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
1735 if (notify)
1736 notify_remote_via_irq(vif->irq);
1737}
1738
1739static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1740 u16 id,
1741 s8 st,
1742 u16 offset,
1743 u16 size,
1744 u16 flags)
1745{
1746 RING_IDX i = vif->rx.rsp_prod_pvt;
1747 struct xen_netif_rx_response *resp;
1748
1749 resp = RING_GET_RESPONSE(&vif->rx, i);
1750 resp->offset = offset;
1751 resp->flags = flags;
1752 resp->id = id;
1753 resp->status = (s16)size;
1754 if (st < 0)
1755 resp->status = (s16)st;
1756
1757 vif->rx.rsp_prod_pvt = ++i;
1758
1759 return resp;
1760}
1761
1762static inline int rx_work_todo(struct xen_netbk *netbk)
1763{
1764 return !skb_queue_empty(&netbk->rx_queue);
1765}
1766
1767static inline int tx_work_todo(struct xen_netbk *netbk)
1768{
1769
2810e5b9
WL
1770 if ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN
1771 < MAX_PENDING_REQS) &&
1772 !list_empty(&netbk->net_schedule_list))
f942dc25
IC
1773 return 1;
1774
1775 return 0;
1776}
1777
1778static int xen_netbk_kthread(void *data)
1779{
1780 struct xen_netbk *netbk = data;
1781 while (!kthread_should_stop()) {
1782 wait_event_interruptible(netbk->wq,
1783 rx_work_todo(netbk) ||
1784 tx_work_todo(netbk) ||
1785 kthread_should_stop());
1786 cond_resched();
1787
1788 if (kthread_should_stop())
1789 break;
1790
1791 if (rx_work_todo(netbk))
1792 xen_netbk_rx_action(netbk);
1793
1794 if (tx_work_todo(netbk))
1795 xen_netbk_tx_action(netbk);
1796 }
1797
1798 return 0;
1799}
1800
1801void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
1802{
c9d63699
DV
1803 if (vif->tx.sring)
1804 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1805 vif->tx.sring);
1806 if (vif->rx.sring)
1807 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1808 vif->rx.sring);
f942dc25
IC
1809}
1810
1811int xen_netbk_map_frontend_rings(struct xenvif *vif,
1812 grant_ref_t tx_ring_ref,
1813 grant_ref_t rx_ring_ref)
1814{
c9d63699 1815 void *addr;
f942dc25
IC
1816 struct xen_netif_tx_sring *txs;
1817 struct xen_netif_rx_sring *rxs;
1818
1819 int err = -ENOMEM;
1820
c9d63699
DV
1821 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1822 tx_ring_ref, &addr);
1823 if (err)
f942dc25
IC
1824 goto err;
1825
c9d63699 1826 txs = (struct xen_netif_tx_sring *)addr;
f942dc25
IC
1827 BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
1828
c9d63699
DV
1829 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1830 rx_ring_ref, &addr);
1831 if (err)
f942dc25 1832 goto err;
f942dc25 1833
c9d63699 1834 rxs = (struct xen_netif_rx_sring *)addr;
f942dc25
IC
1835 BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
1836
c9d63699
DV
1837 vif->rx_req_cons_peek = 0;
1838
f942dc25
IC
1839 return 0;
1840
1841err:
1842 xen_netbk_unmap_frontend_rings(vif);
1843 return err;
1844}
1845
1846static int __init netback_init(void)
1847{
1848 int i;
1849 int rc = 0;
1850 int group;
1851
2a14b244 1852 if (!xen_domain())
f942dc25
IC
1853 return -ENODEV;
1854
2810e5b9
WL
1855 if (max_skb_slots < XEN_NETIF_NR_SLOTS_MIN) {
1856 printk(KERN_INFO
1857 "xen-netback: max_skb_slots too small (%d), bump it to XEN_NETIF_NR_SLOTS_MIN (%d)\n",
1858 max_skb_slots, XEN_NETIF_NR_SLOTS_MIN);
1859 max_skb_slots = XEN_NETIF_NR_SLOTS_MIN;
1860 }
1861
f942dc25
IC
1862 xen_netbk_group_nr = num_online_cpus();
1863 xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
e404decb 1864 if (!xen_netbk)
f942dc25 1865 return -ENOMEM;
f942dc25
IC
1866
1867 for (group = 0; group < xen_netbk_group_nr; group++) {
1868 struct xen_netbk *netbk = &xen_netbk[group];
1869 skb_queue_head_init(&netbk->rx_queue);
1870 skb_queue_head_init(&netbk->tx_queue);
1871
1872 init_timer(&netbk->net_timer);
1873 netbk->net_timer.data = (unsigned long)netbk;
1874 netbk->net_timer.function = xen_netbk_alarm;
1875
1876 netbk->pending_cons = 0;
1877 netbk->pending_prod = MAX_PENDING_REQS;
1878 for (i = 0; i < MAX_PENDING_REQS; i++)
1879 netbk->pending_ring[i] = i;
1880
1881 init_waitqueue_head(&netbk->wq);
1882 netbk->task = kthread_create(xen_netbk_kthread,
1883 (void *)netbk,
1884 "netback/%u", group);
1885
1886 if (IS_ERR(netbk->task)) {
6b84bd16 1887 printk(KERN_ALERT "kthread_create() fails at netback\n");
f942dc25
IC
1888 del_timer(&netbk->net_timer);
1889 rc = PTR_ERR(netbk->task);
1890 goto failed_init;
1891 }
1892
1893 kthread_bind(netbk->task, group);
1894
1895 INIT_LIST_HEAD(&netbk->net_schedule_list);
1896
1897 spin_lock_init(&netbk->net_schedule_list_lock);
1898
1899 atomic_set(&netbk->netfront_count, 0);
1900
1901 wake_up_process(netbk->task);
1902 }
1903
1904 rc = xenvif_xenbus_init();
1905 if (rc)
1906 goto failed_init;
1907
1908 return 0;
1909
1910failed_init:
1911 while (--group >= 0) {
1912 struct xen_netbk *netbk = &xen_netbk[group];
1913 for (i = 0; i < MAX_PENDING_REQS; i++) {
1914 if (netbk->mmap_pages[i])
1915 __free_page(netbk->mmap_pages[i]);
1916 }
1917 del_timer(&netbk->net_timer);
1918 kthread_stop(netbk->task);
1919 }
1920 vfree(xen_netbk);
1921 return rc;
1922
1923}
1924
1925module_init(netback_init);
1926
1927MODULE_LICENSE("Dual BSD/GPL");
f984cec6 1928MODULE_ALIAS("xen-backend:vif");