net: sysctl_net_core: check SNDBUF and RCVBUF for min length
[linux-2.6-block.git] / drivers / net / xen-netback / netback.c
CommitLineData
f942dc25
IC
1/*
2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
7 *
8 * Copyright (c) 2002-2005, K A Fraser
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
22 *
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * IN THE SOFTWARE.
33 */
34
35#include "common.h"
36
37#include <linux/kthread.h>
38#include <linux/if_vlan.h>
39#include <linux/udp.h>
e3377f36 40#include <linux/highmem.h>
f942dc25
IC
41
42#include <net/tcp.h>
43
ca981633 44#include <xen/xen.h>
f942dc25
IC
45#include <xen/events.h>
46#include <xen/interface/memory.h>
47
48#include <asm/xen/hypercall.h>
49#include <asm/xen/page.h>
50
e1f00a69
WL
51/* Provide an option to disable split event channels at load time as
52 * event channels are limited resource. Split event channels are
53 * enabled by default.
54 */
55bool separate_tx_rx_irq = 1;
56module_param(separate_tx_rx_irq, bool, 0644);
57
f48da8b1
DV
58/* The time that packets can stay on the guest Rx internal queue
59 * before they are dropped.
09350788
ZK
60 */
61unsigned int rx_drain_timeout_msecs = 10000;
62module_param(rx_drain_timeout_msecs, uint, 0444);
09350788 63
ecf08d2d
DV
64/* The length of time before the frontend is considered unresponsive
65 * because it isn't providing Rx slots.
66 */
26c0e102 67unsigned int rx_stall_timeout_msecs = 60000;
ecf08d2d 68module_param(rx_stall_timeout_msecs, uint, 0444);
ecf08d2d 69
8d3d53b3
AB
70unsigned int xenvif_max_queues;
71module_param_named(max_queues, xenvif_max_queues, uint, 0644);
72MODULE_PARM_DESC(max_queues,
73 "Maximum number of queues per virtual interface");
74
2810e5b9
WL
75/*
76 * This is the maximum slots a skb can have. If a guest sends a skb
77 * which exceeds this limit it is considered malicious.
78 */
37641494
WL
79#define FATAL_SKB_SLOTS_DEFAULT 20
80static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
81module_param(fatal_skb_slots, uint, 0444);
82
7e5d7753
MC
83/* The amount to copy out of the first guest Tx slot into the skb's
84 * linear area. If the first slot has more data, it will be mapped
85 * and put into the first frag.
86 *
87 * This is sized to avoid pulling headers from the frags for most
88 * TCP/IP packets.
89 */
90#define XEN_NETBACK_TX_COPY_LEN 128
91
92
e9ce7cb6 93static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
7376419a
WL
94 u8 status);
95
e9ce7cb6 96static void make_tx_response(struct xenvif_queue *queue,
f942dc25
IC
97 struct xen_netif_tx_request *txp,
98 s8 st);
b3f980bd 99
e9ce7cb6 100static inline int tx_work_todo(struct xenvif_queue *queue);
b3f980bd 101
e9ce7cb6 102static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
f942dc25
IC
103 u16 id,
104 s8 st,
105 u16 offset,
106 u16 size,
107 u16 flags);
108
e9ce7cb6 109static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
ea066ad1 110 u16 idx)
f942dc25 111{
e9ce7cb6 112 return page_to_pfn(queue->mmap_pages[idx]);
f942dc25
IC
113}
114
e9ce7cb6 115static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
ea066ad1 116 u16 idx)
f942dc25 117{
e9ce7cb6 118 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
f942dc25
IC
119}
120
7aceb47a
ZK
121#define callback_param(vif, pending_idx) \
122 (vif->pending_tx_info[pending_idx].callback_struct)
123
f53c3fe8
ZK
124/* Find the containing VIF's structure from a pointer in pending_tx_info array
125 */
e9ce7cb6 126static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
3e2234b3 127{
f53c3fe8
ZK
128 u16 pending_idx = ubuf->desc;
129 struct pending_tx_info *temp =
130 container_of(ubuf, struct pending_tx_info, callback_struct);
131 return container_of(temp - pending_idx,
e9ce7cb6 132 struct xenvif_queue,
f53c3fe8 133 pending_tx_info[0]);
3e2234b3 134}
f53c3fe8 135
ea066ad1
IC
136static u16 frag_get_pending_idx(skb_frag_t *frag)
137{
138 return (u16)frag->page_offset;
139}
140
141static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
142{
143 frag->page_offset = pending_idx;
144}
145
f942dc25
IC
146static inline pending_ring_idx_t pending_index(unsigned i)
147{
148 return i & (MAX_PENDING_REQS-1);
149}
150
e9ce7cb6 151bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)
f942dc25 152{
ca2f09f2 153 RING_IDX prod, cons;
f942dc25 154
ca2f09f2 155 do {
e9ce7cb6
WL
156 prod = queue->rx.sring->req_prod;
157 cons = queue->rx.req_cons;
f942dc25 158
ca2f09f2
PD
159 if (prod - cons >= needed)
160 return true;
f942dc25 161
e9ce7cb6 162 queue->rx.sring->req_event = prod + 1;
f942dc25 163
ca2f09f2
PD
164 /* Make sure event is visible before we check prod
165 * again.
166 */
167 mb();
e9ce7cb6 168 } while (queue->rx.sring->req_prod != prod);
f942dc25 169
ca2f09f2 170 return false;
f942dc25
IC
171}
172
f48da8b1
DV
173void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
174{
175 unsigned long flags;
176
177 spin_lock_irqsave(&queue->rx_queue.lock, flags);
178
179 __skb_queue_tail(&queue->rx_queue, skb);
180
181 queue->rx_queue_len += skb->len;
182 if (queue->rx_queue_len > queue->rx_queue_max)
183 netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
184
185 spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
186}
187
188static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
189{
190 struct sk_buff *skb;
191
192 spin_lock_irq(&queue->rx_queue.lock);
193
194 skb = __skb_dequeue(&queue->rx_queue);
195 if (skb)
196 queue->rx_queue_len -= skb->len;
197
198 spin_unlock_irq(&queue->rx_queue.lock);
199
200 return skb;
201}
202
203static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue)
204{
205 spin_lock_irq(&queue->rx_queue.lock);
206
207 if (queue->rx_queue_len < queue->rx_queue_max)
208 netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
209
210 spin_unlock_irq(&queue->rx_queue.lock);
211}
212
213
214static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
215{
216 struct sk_buff *skb;
217 while ((skb = xenvif_rx_dequeue(queue)) != NULL)
218 kfree_skb(skb);
219}
220
221static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
222{
223 struct sk_buff *skb;
224
225 for(;;) {
226 skb = skb_peek(&queue->rx_queue);
227 if (!skb)
228 break;
229 if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
230 break;
231 xenvif_rx_dequeue(queue);
232 kfree_skb(skb);
233 }
234}
235
f942dc25
IC
236struct netrx_pending_operations {
237 unsigned copy_prod, copy_cons;
238 unsigned meta_prod, meta_cons;
239 struct gnttab_copy *copy;
b3f980bd 240 struct xenvif_rx_meta *meta;
f942dc25
IC
241 int copy_off;
242 grant_ref_t copy_gref;
243};
244
e9ce7cb6 245static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
b3f980bd 246 struct netrx_pending_operations *npo)
f942dc25 247{
b3f980bd 248 struct xenvif_rx_meta *meta;
f942dc25
IC
249 struct xen_netif_rx_request *req;
250
e9ce7cb6 251 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
f942dc25
IC
252
253 meta = npo->meta + npo->meta_prod++;
82cada22 254 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
f942dc25
IC
255 meta->gso_size = 0;
256 meta->size = 0;
257 meta->id = req->id;
258
259 npo->copy_off = 0;
260 npo->copy_gref = req->gref;
261
262 return meta;
263}
264
33bc801d
WL
265/*
266 * Set up the grant operations for this fragment. If it's a flipping
267 * interface, we also set up the unmap request from here.
268 */
e9ce7cb6 269static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
7376419a
WL
270 struct netrx_pending_operations *npo,
271 struct page *page, unsigned long size,
c2677a6f 272 unsigned long offset, int *head)
f942dc25
IC
273{
274 struct gnttab_copy *copy_gop;
b3f980bd 275 struct xenvif_rx_meta *meta;
f942dc25 276 unsigned long bytes;
5bd07670 277 int gso_type = XEN_NETIF_GSO_TYPE_NONE;
f942dc25
IC
278
279 /* Data must not cross a page boundary. */
6a8ed462 280 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
f942dc25
IC
281
282 meta = npo->meta + npo->meta_prod - 1;
283
6a8ed462
IC
284 /* Skip unused frames from start of page */
285 page += offset >> PAGE_SHIFT;
286 offset &= ~PAGE_MASK;
287
f942dc25 288 while (size > 0) {
c2677a6f
JH
289 struct xen_page_foreign *foreign;
290
6a8ed462 291 BUG_ON(offset >= PAGE_SIZE);
f942dc25
IC
292 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
293
1650d545
DV
294 if (npo->copy_off == MAX_BUFFER_OFFSET)
295 meta = get_next_rx_buffer(queue, npo);
6a8ed462 296
1650d545 297 bytes = PAGE_SIZE - offset;
6a8ed462
IC
298 if (bytes > size)
299 bytes = size;
300
f942dc25
IC
301 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
302 bytes = MAX_BUFFER_OFFSET - npo->copy_off;
303
304 copy_gop = npo->copy + npo->copy_prod++;
305 copy_gop->flags = GNTCOPY_dest_gref;
b3f980bd
WL
306 copy_gop->len = bytes;
307
c2677a6f
JH
308 foreign = xen_page_foreign(page);
309 if (foreign) {
310 copy_gop->source.domid = foreign->domid;
311 copy_gop->source.u.ref = foreign->gref;
3e2234b3
ZK
312 copy_gop->flags |= GNTCOPY_source_gref;
313 } else {
314 copy_gop->source.domid = DOMID_SELF;
315 copy_gop->source.u.gmfn =
316 virt_to_mfn(page_address(page));
317 }
f942dc25 318 copy_gop->source.offset = offset;
f942dc25 319
e9ce7cb6 320 copy_gop->dest.domid = queue->vif->domid;
f942dc25
IC
321 copy_gop->dest.offset = npo->copy_off;
322 copy_gop->dest.u.ref = npo->copy_gref;
f942dc25
IC
323
324 npo->copy_off += bytes;
325 meta->size += bytes;
326
327 offset += bytes;
328 size -= bytes;
329
6a8ed462
IC
330 /* Next frame */
331 if (offset == PAGE_SIZE && size) {
332 BUG_ON(!PageCompound(page));
333 page++;
334 offset = 0;
335 }
336
f942dc25 337 /* Leave a gap for the GSO descriptor. */
5bd07670
AL
338 if (skb_is_gso(skb)) {
339 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
340 gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
341 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
342 gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
343 }
82cada22 344
e9ce7cb6
WL
345 if (*head && ((1 << gso_type) & queue->vif->gso_mask))
346 queue->rx.req_cons++;
f942dc25 347
33bc801d 348 *head = 0; /* There must be something in this buffer now. */
f942dc25
IC
349
350 }
351}
352
353/*
354 * Prepare an SKB to be transmitted to the frontend.
355 *
356 * This function is responsible for allocating grant operations, meta
357 * structures, etc.
358 *
359 * It returns the number of meta structures consumed. The number of
360 * ring slots used is always equal to the number of meta slots used
361 * plus the number of GSO descriptors used. Currently, we use either
362 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
363 * frontend-side LRO).
364 */
7376419a 365static int xenvif_gop_skb(struct sk_buff *skb,
e9ce7cb6
WL
366 struct netrx_pending_operations *npo,
367 struct xenvif_queue *queue)
f942dc25
IC
368{
369 struct xenvif *vif = netdev_priv(skb->dev);
370 int nr_frags = skb_shinfo(skb)->nr_frags;
371 int i;
372 struct xen_netif_rx_request *req;
b3f980bd 373 struct xenvif_rx_meta *meta;
f942dc25 374 unsigned char *data;
33bc801d 375 int head = 1;
f942dc25 376 int old_meta_prod;
82cada22 377 int gso_type;
f942dc25
IC
378
379 old_meta_prod = npo->meta_prod;
380
5bd07670
AL
381 gso_type = XEN_NETIF_GSO_TYPE_NONE;
382 if (skb_is_gso(skb)) {
383 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
384 gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
385 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
386 gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
82cada22
PD
387 }
388
f942dc25 389 /* Set up a GSO prefix descriptor, if necessary */
a3314f3d 390 if ((1 << gso_type) & vif->gso_prefix_mask) {
e9ce7cb6 391 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
f942dc25 392 meta = npo->meta + npo->meta_prod++;
82cada22 393 meta->gso_type = gso_type;
5bd07670 394 meta->gso_size = skb_shinfo(skb)->gso_size;
f942dc25
IC
395 meta->size = 0;
396 meta->id = req->id;
397 }
398
e9ce7cb6 399 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
f942dc25
IC
400 meta = npo->meta + npo->meta_prod++;
401
82cada22
PD
402 if ((1 << gso_type) & vif->gso_mask) {
403 meta->gso_type = gso_type;
5bd07670 404 meta->gso_size = skb_shinfo(skb)->gso_size;
82cada22
PD
405 } else {
406 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
f942dc25 407 meta->gso_size = 0;
82cada22 408 }
f942dc25
IC
409
410 meta->size = 0;
411 meta->id = req->id;
412 npo->copy_off = 0;
413 npo->copy_gref = req->gref;
414
415 data = skb->data;
416 while (data < skb_tail_pointer(skb)) {
417 unsigned int offset = offset_in_page(data);
418 unsigned int len = PAGE_SIZE - offset;
419
420 if (data + len > skb_tail_pointer(skb))
421 len = skb_tail_pointer(skb) - data;
422
e9ce7cb6 423 xenvif_gop_frag_copy(queue, skb, npo,
c2677a6f 424 virt_to_page(data), len, offset, &head);
f942dc25
IC
425 data += len;
426 }
427
428 for (i = 0; i < nr_frags; i++) {
e9ce7cb6 429 xenvif_gop_frag_copy(queue, skb, npo,
7376419a
WL
430 skb_frag_page(&skb_shinfo(skb)->frags[i]),
431 skb_frag_size(&skb_shinfo(skb)->frags[i]),
432 skb_shinfo(skb)->frags[i].page_offset,
c2677a6f 433 &head);
f942dc25
IC
434 }
435
436 return npo->meta_prod - old_meta_prod;
437}
438
439/*
7376419a 440 * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was
f942dc25
IC
441 * used to set up the operations on the top of
442 * netrx_pending_operations, which have since been done. Check that
443 * they didn't give any errors and advance over them.
444 */
7376419a
WL
445static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
446 struct netrx_pending_operations *npo)
f942dc25
IC
447{
448 struct gnttab_copy *copy_op;
449 int status = XEN_NETIF_RSP_OKAY;
450 int i;
451
452 for (i = 0; i < nr_meta_slots; i++) {
453 copy_op = npo->copy + npo->copy_cons++;
454 if (copy_op->status != GNTST_okay) {
455 netdev_dbg(vif->dev,
456 "Bad status %d from copy to DOM%d.\n",
457 copy_op->status, vif->domid);
458 status = XEN_NETIF_RSP_ERROR;
459 }
460 }
461
462 return status;
463}
464
e9ce7cb6 465static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
7376419a
WL
466 struct xenvif_rx_meta *meta,
467 int nr_meta_slots)
f942dc25
IC
468{
469 int i;
470 unsigned long offset;
471
472 /* No fragments used */
473 if (nr_meta_slots <= 1)
474 return;
475
476 nr_meta_slots--;
477
478 for (i = 0; i < nr_meta_slots; i++) {
479 int flags;
480 if (i == nr_meta_slots - 1)
481 flags = 0;
482 else
483 flags = XEN_NETRXF_more_data;
484
485 offset = 0;
e9ce7cb6 486 make_rx_response(queue, meta[i].id, status, offset,
f942dc25
IC
487 meta[i].size, flags);
488 }
489}
490
e9ce7cb6 491void xenvif_kick_thread(struct xenvif_queue *queue)
b3f980bd 492{
e9ce7cb6 493 wake_up(&queue->wq);
b3f980bd
WL
494}
495
e9ce7cb6 496static void xenvif_rx_action(struct xenvif_queue *queue)
f942dc25 497{
f942dc25 498 s8 status;
e1f00a69 499 u16 flags;
f942dc25
IC
500 struct xen_netif_rx_response *resp;
501 struct sk_buff_head rxq;
502 struct sk_buff *skb;
503 LIST_HEAD(notify);
504 int ret;
f942dc25 505 unsigned long offset;
11b57f90 506 bool need_to_notify = false;
f942dc25
IC
507
508 struct netrx_pending_operations npo = {
e9ce7cb6
WL
509 .copy = queue->grant_copy_op,
510 .meta = queue->meta,
f942dc25
IC
511 };
512
513 skb_queue_head_init(&rxq);
514
f48da8b1
DV
515 while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
516 && (skb = xenvif_rx_dequeue(queue)) != NULL) {
1425c7a4
PD
517 RING_IDX old_req_cons;
518 RING_IDX ring_slots_used;
ca2f09f2 519
ecf08d2d
DV
520 queue->last_rx_time = jiffies;
521
e9ce7cb6
WL
522 old_req_cons = queue->rx.req_cons;
523 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
524 ring_slots_used = queue->rx.req_cons - old_req_cons;
1425c7a4 525
f942dc25 526 __skb_queue_tail(&rxq, skb);
f942dc25
IC
527 }
528
e9ce7cb6 529 BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
f942dc25
IC
530
531 if (!npo.copy_prod)
ca2f09f2 532 goto done;
f942dc25 533
ac3d5ac2 534 BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
e9ce7cb6 535 gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
f942dc25
IC
536
537 while ((skb = __skb_dequeue(&rxq)) != NULL) {
f942dc25 538
e9ce7cb6
WL
539 if ((1 << queue->meta[npo.meta_cons].gso_type) &
540 queue->vif->gso_prefix_mask) {
541 resp = RING_GET_RESPONSE(&queue->rx,
542 queue->rx.rsp_prod_pvt++);
f942dc25
IC
543
544 resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
545
e9ce7cb6
WL
546 resp->offset = queue->meta[npo.meta_cons].gso_size;
547 resp->id = queue->meta[npo.meta_cons].id;
8f13dd96 548 resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
f942dc25
IC
549
550 npo.meta_cons++;
8f13dd96 551 XENVIF_RX_CB(skb)->meta_slots_used--;
f942dc25
IC
552 }
553
554
e9ce7cb6
WL
555 queue->stats.tx_bytes += skb->len;
556 queue->stats.tx_packets++;
f942dc25 557
e9ce7cb6 558 status = xenvif_check_gop(queue->vif,
8f13dd96
ZK
559 XENVIF_RX_CB(skb)->meta_slots_used,
560 &npo);
f942dc25 561
8f13dd96 562 if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
f942dc25
IC
563 flags = 0;
564 else
565 flags = XEN_NETRXF_more_data;
566
567 if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
568 flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
569 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
570 /* remote but checksummed. */
571 flags |= XEN_NETRXF_data_validated;
572
573 offset = 0;
e9ce7cb6 574 resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
f942dc25 575 status, offset,
e9ce7cb6 576 queue->meta[npo.meta_cons].size,
f942dc25
IC
577 flags);
578
e9ce7cb6
WL
579 if ((1 << queue->meta[npo.meta_cons].gso_type) &
580 queue->vif->gso_mask) {
f942dc25
IC
581 struct xen_netif_extra_info *gso =
582 (struct xen_netif_extra_info *)
e9ce7cb6
WL
583 RING_GET_RESPONSE(&queue->rx,
584 queue->rx.rsp_prod_pvt++);
f942dc25
IC
585
586 resp->flags |= XEN_NETRXF_extra_info;
587
e9ce7cb6
WL
588 gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
589 gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
f942dc25
IC
590 gso->u.gso.pad = 0;
591 gso->u.gso.features = 0;
592
593 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
594 gso->flags = 0;
595 }
596
e9ce7cb6
WL
597 xenvif_add_frag_responses(queue, status,
598 queue->meta + npo.meta_cons + 1,
8f13dd96 599 XENVIF_RX_CB(skb)->meta_slots_used);
f942dc25 600
e9ce7cb6 601 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
f942dc25 602
11b57f90 603 need_to_notify |= !!ret;
b3f980bd 604
8f13dd96 605 npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
f942dc25
IC
606 dev_kfree_skb(skb);
607 }
608
ca2f09f2 609done:
b3f980bd 610 if (need_to_notify)
e9ce7cb6 611 notify_remote_via_irq(queue->rx_irq);
f942dc25
IC
612}
613
e9ce7cb6 614void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
f942dc25
IC
615{
616 int more_to_do;
617
e9ce7cb6 618 RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
f942dc25
IC
619
620 if (more_to_do)
e9ce7cb6 621 napi_schedule(&queue->napi);
f942dc25
IC
622}
623
e9ce7cb6 624static void tx_add_credit(struct xenvif_queue *queue)
f942dc25
IC
625{
626 unsigned long max_burst, max_credit;
627
628 /*
629 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
630 * Otherwise the interface can seize up due to insufficient credit.
631 */
e9ce7cb6 632 max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size;
f942dc25 633 max_burst = min(max_burst, 131072UL);
e9ce7cb6 634 max_burst = max(max_burst, queue->credit_bytes);
f942dc25
IC
635
636 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
e9ce7cb6
WL
637 max_credit = queue->remaining_credit + queue->credit_bytes;
638 if (max_credit < queue->remaining_credit)
f942dc25
IC
639 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
640
e9ce7cb6 641 queue->remaining_credit = min(max_credit, max_burst);
f942dc25
IC
642}
643
644static void tx_credit_callback(unsigned long data)
645{
e9ce7cb6
WL
646 struct xenvif_queue *queue = (struct xenvif_queue *)data;
647 tx_add_credit(queue);
648 xenvif_napi_schedule_or_enable_events(queue);
f942dc25
IC
649}
650
e9ce7cb6 651static void xenvif_tx_err(struct xenvif_queue *queue,
7376419a 652 struct xen_netif_tx_request *txp, RING_IDX end)
f942dc25 653{
e9ce7cb6 654 RING_IDX cons = queue->tx.req_cons;
f53c3fe8 655 unsigned long flags;
f942dc25
IC
656
657 do {
7fbb9d84
DV
658 int notify;
659
e9ce7cb6
WL
660 spin_lock_irqsave(&queue->response_lock, flags);
661 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
7fbb9d84 662 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
e9ce7cb6 663 spin_unlock_irqrestore(&queue->response_lock, flags);
7fbb9d84
DV
664 if (notify)
665 notify_remote_via_irq(queue->tx_irq);
666
b9149729 667 if (cons == end)
f942dc25 668 break;
e9ce7cb6 669 txp = RING_GET_REQUEST(&queue->tx, cons++);
f942dc25 670 } while (1);
e9ce7cb6 671 queue->tx.req_cons = cons;
f942dc25
IC
672}
673
7376419a 674static void xenvif_fatal_tx_err(struct xenvif *vif)
48856286
IC
675{
676 netdev_err(vif->dev, "fatal error; disabling device\n");
e9d8b2c2 677 vif->disabled = true;
e9ce7cb6
WL
678 /* Disable the vif from queue 0's kthread */
679 if (vif->queues)
680 xenvif_kick_thread(&vif->queues[0]);
48856286
IC
681}
682
e9ce7cb6 683static int xenvif_count_requests(struct xenvif_queue *queue,
7376419a
WL
684 struct xen_netif_tx_request *first,
685 struct xen_netif_tx_request *txp,
686 int work_to_do)
f942dc25 687{
e9ce7cb6 688 RING_IDX cons = queue->tx.req_cons;
2810e5b9
WL
689 int slots = 0;
690 int drop_err = 0;
59ccb4eb 691 int more_data;
f942dc25
IC
692
693 if (!(first->flags & XEN_NETTXF_more_data))
694 return 0;
695
696 do {
59ccb4eb
WL
697 struct xen_netif_tx_request dropped_tx = { 0 };
698
2810e5b9 699 if (slots >= work_to_do) {
e9ce7cb6 700 netdev_err(queue->vif->dev,
2810e5b9
WL
701 "Asked for %d slots but exceeds this limit\n",
702 work_to_do);
e9ce7cb6 703 xenvif_fatal_tx_err(queue->vif);
35876b5f 704 return -ENODATA;
f942dc25
IC
705 }
706
2810e5b9
WL
707 /* This guest is really using too many slots and
708 * considered malicious.
709 */
37641494 710 if (unlikely(slots >= fatal_skb_slots)) {
e9ce7cb6 711 netdev_err(queue->vif->dev,
2810e5b9 712 "Malicious frontend using %d slots, threshold %u\n",
37641494 713 slots, fatal_skb_slots);
e9ce7cb6 714 xenvif_fatal_tx_err(queue->vif);
35876b5f 715 return -E2BIG;
f942dc25
IC
716 }
717
2810e5b9 718 /* Xen network protocol had implicit dependency on
37641494
WL
719 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
720 * the historical MAX_SKB_FRAGS value 18 to honor the
721 * same behavior as before. Any packet using more than
722 * 18 slots but less than fatal_skb_slots slots is
723 * dropped
2810e5b9 724 */
37641494 725 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
2810e5b9 726 if (net_ratelimit())
e9ce7cb6 727 netdev_dbg(queue->vif->dev,
2810e5b9 728 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
37641494 729 slots, XEN_NETBK_LEGACY_SLOTS_MAX);
2810e5b9
WL
730 drop_err = -E2BIG;
731 }
732
59ccb4eb
WL
733 if (drop_err)
734 txp = &dropped_tx;
735
e9ce7cb6 736 memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots),
f942dc25 737 sizeof(*txp));
03393fd5
WL
738
739 /* If the guest submitted a frame >= 64 KiB then
740 * first->size overflowed and following slots will
741 * appear to be larger than the frame.
742 *
743 * This cannot be fatal error as there are buggy
744 * frontends that do this.
745 *
746 * Consume all slots and drop the packet.
747 */
748 if (!drop_err && txp->size > first->size) {
749 if (net_ratelimit())
e9ce7cb6 750 netdev_dbg(queue->vif->dev,
03393fd5
WL
751 "Invalid tx request, slot size %u > remaining size %u\n",
752 txp->size, first->size);
753 drop_err = -EIO;
f942dc25
IC
754 }
755
756 first->size -= txp->size;
2810e5b9 757 slots++;
f942dc25
IC
758
759 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
e9ce7cb6 760 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
f942dc25 761 txp->offset, txp->size);
e9ce7cb6 762 xenvif_fatal_tx_err(queue->vif);
35876b5f 763 return -EINVAL;
f942dc25 764 }
59ccb4eb
WL
765
766 more_data = txp->flags & XEN_NETTXF_more_data;
767
768 if (!drop_err)
769 txp++;
770
771 } while (more_data);
2810e5b9
WL
772
773 if (drop_err) {
e9ce7cb6 774 xenvif_tx_err(queue, first, cons + slots);
2810e5b9
WL
775 return drop_err;
776 }
777
778 return slots;
f942dc25
IC
779}
780
8f13dd96
ZK
781
782struct xenvif_tx_cb {
783 u16 pending_idx;
784};
785
786#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
787
e9ce7cb6 788static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
9074ce24
ZK
789 u16 pending_idx,
790 struct xen_netif_tx_request *txp,
791 struct gnttab_map_grant_ref *mop)
f53c3fe8 792{
e9ce7cb6
WL
793 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
794 gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
f53c3fe8 795 GNTMAP_host_map | GNTMAP_readonly,
e9ce7cb6 796 txp->gref, queue->vif->domid);
f53c3fe8 797
e9ce7cb6 798 memcpy(&queue->pending_tx_info[pending_idx].req, txp,
f53c3fe8
ZK
799 sizeof(*txp));
800}
801
e3377f36
ZK
802static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
803{
804 struct sk_buff *skb =
805 alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
806 GFP_ATOMIC | __GFP_NOWARN);
807 if (unlikely(skb == NULL))
808 return NULL;
809
810 /* Packets passed to netif_rx() must have some headroom. */
811 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
812
813 /* Initialize it here to avoid later surprises */
814 skb_shinfo(skb)->destructor_arg = NULL;
815
816 return skb;
817}
818
e9ce7cb6 819static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
f53c3fe8
ZK
820 struct sk_buff *skb,
821 struct xen_netif_tx_request *txp,
822 struct gnttab_map_grant_ref *gop)
f942dc25
IC
823{
824 struct skb_shared_info *shinfo = skb_shinfo(skb);
825 skb_frag_t *frags = shinfo->frags;
8f13dd96 826 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
62bad319
ZK
827 int start;
828 pending_ring_idx_t index;
e3377f36 829 unsigned int nr_slots, frag_overflow = 0;
2810e5b9
WL
830
831 /* At this point shinfo->nr_frags is in fact the number of
37641494 832 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
2810e5b9 833 */
e3377f36
ZK
834 if (shinfo->nr_frags > MAX_SKB_FRAGS) {
835 frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
836 BUG_ON(frag_overflow > MAX_SKB_FRAGS);
837 shinfo->nr_frags = MAX_SKB_FRAGS;
838 }
2810e5b9 839 nr_slots = shinfo->nr_frags;
f942dc25
IC
840
841 /* Skip first skb fragment if it is on same page as header fragment. */
ea066ad1 842 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
f942dc25 843
f53c3fe8
ZK
844 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
845 shinfo->nr_frags++, txp++, gop++) {
e9ce7cb6
WL
846 index = pending_index(queue->pending_cons++);
847 pending_idx = queue->pending_ring[index];
848 xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
f53c3fe8 849 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
f942dc25
IC
850 }
851
e3377f36
ZK
852 if (frag_overflow) {
853 struct sk_buff *nskb = xenvif_alloc_skb(0);
854 if (unlikely(nskb == NULL)) {
855 if (net_ratelimit())
e9ce7cb6 856 netdev_err(queue->vif->dev,
e3377f36
ZK
857 "Can't allocate the frag_list skb.\n");
858 return NULL;
859 }
860
861 shinfo = skb_shinfo(nskb);
862 frags = shinfo->frags;
863
864 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
865 shinfo->nr_frags++, txp++, gop++) {
e9ce7cb6
WL
866 index = pending_index(queue->pending_cons++);
867 pending_idx = queue->pending_ring[index];
868 xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
e3377f36
ZK
869 frag_set_pending_idx(&frags[shinfo->nr_frags],
870 pending_idx);
871 }
872
873 skb_shinfo(skb)->frag_list = nskb;
874 }
2810e5b9 875
f942dc25
IC
876 return gop;
877}
878
e9ce7cb6 879static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
f53c3fe8
ZK
880 u16 pending_idx,
881 grant_handle_t handle)
882{
e9ce7cb6 883 if (unlikely(queue->grant_tx_handle[pending_idx] !=
f53c3fe8 884 NETBACK_INVALID_HANDLE)) {
e9ce7cb6 885 netdev_err(queue->vif->dev,
f53c3fe8
ZK
886 "Trying to overwrite active handle! pending_idx: %x\n",
887 pending_idx);
888 BUG();
889 }
e9ce7cb6 890 queue->grant_tx_handle[pending_idx] = handle;
f53c3fe8
ZK
891}
892
e9ce7cb6 893static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
f53c3fe8
ZK
894 u16 pending_idx)
895{
e9ce7cb6 896 if (unlikely(queue->grant_tx_handle[pending_idx] ==
f53c3fe8 897 NETBACK_INVALID_HANDLE)) {
e9ce7cb6 898 netdev_err(queue->vif->dev,
f53c3fe8
ZK
899 "Trying to unmap invalid handle! pending_idx: %x\n",
900 pending_idx);
901 BUG();
902 }
e9ce7cb6 903 queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
f53c3fe8
ZK
904}
905
e9ce7cb6 906static int xenvif_tx_check_gop(struct xenvif_queue *queue,
7376419a 907 struct sk_buff *skb,
bdab8275
ZK
908 struct gnttab_map_grant_ref **gopp_map,
909 struct gnttab_copy **gopp_copy)
f942dc25 910{
9074ce24 911 struct gnttab_map_grant_ref *gop_map = *gopp_map;
8f13dd96 912 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1a998d3e
ZK
913 /* This always points to the shinfo of the skb being checked, which
914 * could be either the first or the one on the frag_list
915 */
f942dc25 916 struct skb_shared_info *shinfo = skb_shinfo(skb);
1a998d3e
ZK
917 /* If this is non-NULL, we are currently checking the frag_list skb, and
918 * this points to the shinfo of the first one
919 */
920 struct skb_shared_info *first_shinfo = NULL;
f942dc25 921 int nr_frags = shinfo->nr_frags;
1b860da0
ZK
922 const bool sharedslot = nr_frags &&
923 frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
bdab8275 924 int i, err;
f942dc25
IC
925
926 /* Check status of header. */
bdab8275 927 err = (*gopp_copy)->status;
bdab8275
ZK
928 if (unlikely(err)) {
929 if (net_ratelimit())
e9ce7cb6 930 netdev_dbg(queue->vif->dev,
00aefceb 931 "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
bdab8275
ZK
932 (*gopp_copy)->status,
933 pending_idx,
934 (*gopp_copy)->source.u.ref);
1b860da0
ZK
935 /* The first frag might still have this slot mapped */
936 if (!sharedslot)
937 xenvif_idx_release(queue, pending_idx,
938 XEN_NETIF_RSP_ERROR);
bdab8275 939 }
d8cfbfc4 940 (*gopp_copy)++;
f942dc25 941
e3377f36 942check_frags:
bdab8275 943 for (i = 0; i < nr_frags; i++, gop_map++) {
f942dc25 944 int j, newerr;
f942dc25 945
ea066ad1 946 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
f942dc25
IC
947
948 /* Check error status: if okay then remember grant handle. */
bdab8275 949 newerr = gop_map->status;
2810e5b9 950
f942dc25 951 if (likely(!newerr)) {
e9ce7cb6 952 xenvif_grant_handle_set(queue,
9074ce24
ZK
953 pending_idx,
954 gop_map->handle);
f942dc25 955 /* Had a previous error? Invalidate this fragment. */
1b860da0 956 if (unlikely(err)) {
e9ce7cb6 957 xenvif_idx_unmap(queue, pending_idx);
1b860da0
ZK
958 /* If the mapping of the first frag was OK, but
959 * the header's copy failed, and they are
960 * sharing a slot, send an error
961 */
962 if (i == 0 && sharedslot)
963 xenvif_idx_release(queue, pending_idx,
964 XEN_NETIF_RSP_ERROR);
965 else
966 xenvif_idx_release(queue, pending_idx,
967 XEN_NETIF_RSP_OKAY);
968 }
f942dc25
IC
969 continue;
970 }
971
972 /* Error on this fragment: respond to client with an error. */
bdab8275 973 if (net_ratelimit())
e9ce7cb6 974 netdev_dbg(queue->vif->dev,
00aefceb 975 "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
bdab8275
ZK
976 i,
977 gop_map->status,
978 pending_idx,
979 gop_map->ref);
1b860da0 980
e9ce7cb6 981 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
f942dc25
IC
982
983 /* Not the first error? Preceding frags already invalidated. */
984 if (err)
985 continue;
1b860da0
ZK
986
987 /* First error: if the header haven't shared a slot with the
988 * first frag, release it as well.
989 */
990 if (!sharedslot)
991 xenvif_idx_release(queue,
992 XENVIF_TX_CB(skb)->pending_idx,
993 XEN_NETIF_RSP_OKAY);
994
995 /* Invalidate preceding fragments of this skb. */
bdab8275 996 for (j = 0; j < i; j++) {
5ccb3ea7 997 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
e9ce7cb6 998 xenvif_idx_unmap(queue, pending_idx);
1b860da0
ZK
999 xenvif_idx_release(queue, pending_idx,
1000 XEN_NETIF_RSP_OKAY);
f942dc25
IC
1001 }
1002
1a998d3e
ZK
1003 /* And if we found the error while checking the frag_list, unmap
1004 * the first skb's frags
1005 */
1006 if (first_shinfo) {
1007 for (j = 0; j < first_shinfo->nr_frags; j++) {
1008 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
1009 xenvif_idx_unmap(queue, pending_idx);
1b860da0
ZK
1010 xenvif_idx_release(queue, pending_idx,
1011 XEN_NETIF_RSP_OKAY);
1a998d3e 1012 }
f942dc25
IC
1013 }
1014
1015 /* Remember the error: invalidate all subsequent fragments. */
1016 err = newerr;
1017 }
1018
1a998d3e
ZK
1019 if (skb_has_frag_list(skb) && !first_shinfo) {
1020 first_shinfo = skb_shinfo(skb);
1021 shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
e3377f36 1022 nr_frags = shinfo->nr_frags;
e3377f36
ZK
1023
1024 goto check_frags;
1025 }
1026
bdab8275 1027 *gopp_map = gop_map;
f942dc25
IC
1028 return err;
1029}
1030
e9ce7cb6 1031static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
f942dc25
IC
1032{
1033 struct skb_shared_info *shinfo = skb_shinfo(skb);
1034 int nr_frags = shinfo->nr_frags;
1035 int i;
f53c3fe8
ZK
1036 u16 prev_pending_idx = INVALID_PENDING_IDX;
1037
f942dc25
IC
1038 for (i = 0; i < nr_frags; i++) {
1039 skb_frag_t *frag = shinfo->frags + i;
1040 struct xen_netif_tx_request *txp;
ea066ad1
IC
1041 struct page *page;
1042 u16 pending_idx;
f942dc25 1043
ea066ad1 1044 pending_idx = frag_get_pending_idx(frag);
f942dc25 1045
f53c3fe8 1046 /* If this is not the first frag, chain it to the previous*/
bdab8275 1047 if (prev_pending_idx == INVALID_PENDING_IDX)
f53c3fe8 1048 skb_shinfo(skb)->destructor_arg =
e9ce7cb6 1049 &callback_param(queue, pending_idx);
bdab8275 1050 else
e9ce7cb6
WL
1051 callback_param(queue, prev_pending_idx).ctx =
1052 &callback_param(queue, pending_idx);
f53c3fe8 1053
e9ce7cb6 1054 callback_param(queue, pending_idx).ctx = NULL;
f53c3fe8
ZK
1055 prev_pending_idx = pending_idx;
1056
e9ce7cb6
WL
1057 txp = &queue->pending_tx_info[pending_idx].req;
1058 page = virt_to_page(idx_to_kaddr(queue, pending_idx));
ea066ad1 1059 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
f942dc25
IC
1060 skb->len += txp->size;
1061 skb->data_len += txp->size;
1062 skb->truesize += txp->size;
1063
f53c3fe8 1064 /* Take an extra reference to offset network stack's put_page */
e9ce7cb6 1065 get_page(queue->mmap_pages[pending_idx]);
f942dc25
IC
1066 }
1067}
1068
e9ce7cb6 1069static int xenvif_get_extras(struct xenvif_queue *queue,
f942dc25
IC
1070 struct xen_netif_extra_info *extras,
1071 int work_to_do)
1072{
1073 struct xen_netif_extra_info extra;
e9ce7cb6 1074 RING_IDX cons = queue->tx.req_cons;
f942dc25
IC
1075
1076 do {
1077 if (unlikely(work_to_do-- <= 0)) {
e9ce7cb6
WL
1078 netdev_err(queue->vif->dev, "Missing extra info\n");
1079 xenvif_fatal_tx_err(queue->vif);
f942dc25
IC
1080 return -EBADR;
1081 }
1082
e9ce7cb6 1083 memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons),
f942dc25
IC
1084 sizeof(extra));
1085 if (unlikely(!extra.type ||
1086 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
e9ce7cb6
WL
1087 queue->tx.req_cons = ++cons;
1088 netdev_err(queue->vif->dev,
f942dc25 1089 "Invalid extra type: %d\n", extra.type);
e9ce7cb6 1090 xenvif_fatal_tx_err(queue->vif);
f942dc25
IC
1091 return -EINVAL;
1092 }
1093
1094 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
e9ce7cb6 1095 queue->tx.req_cons = ++cons;
f942dc25
IC
1096 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1097
1098 return work_to_do;
1099}
1100
7376419a
WL
1101static int xenvif_set_skb_gso(struct xenvif *vif,
1102 struct sk_buff *skb,
1103 struct xen_netif_extra_info *gso)
f942dc25
IC
1104{
1105 if (!gso->u.gso.size) {
48856286 1106 netdev_err(vif->dev, "GSO size must not be zero.\n");
7376419a 1107 xenvif_fatal_tx_err(vif);
f942dc25
IC
1108 return -EINVAL;
1109 }
1110
a9468587
PD
1111 switch (gso->u.gso.type) {
1112 case XEN_NETIF_GSO_TYPE_TCPV4:
1113 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1114 break;
1115 case XEN_NETIF_GSO_TYPE_TCPV6:
1116 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1117 break;
1118 default:
48856286 1119 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
7376419a 1120 xenvif_fatal_tx_err(vif);
f942dc25
IC
1121 return -EINVAL;
1122 }
1123
1124 skb_shinfo(skb)->gso_size = gso->u.gso.size;
b89587a7 1125 /* gso_segs will be calculated later */
f942dc25
IC
1126
1127 return 0;
1128}
1129
e9ce7cb6 1130static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
2eba61d5 1131{
2721637c 1132 bool recalculate_partial_csum = false;
2eba61d5
PD
1133
1134 /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1135 * peers can fail to set NETRXF_csum_blank when sending a GSO
1136 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1137 * recalculate the partial checksum.
1138 */
1139 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
e9ce7cb6 1140 queue->stats.rx_gso_checksum_fixup++;
2eba61d5 1141 skb->ip_summed = CHECKSUM_PARTIAL;
2721637c 1142 recalculate_partial_csum = true;
2eba61d5
PD
1143 }
1144
1145 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1146 if (skb->ip_summed != CHECKSUM_PARTIAL)
1147 return 0;
1148
2721637c 1149 return skb_checksum_setup(skb, recalculate_partial_csum);
2eba61d5
PD
1150}
1151
e9ce7cb6 1152static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
f942dc25 1153{
059dfa6a 1154 u64 now = get_jiffies_64();
e9ce7cb6
WL
1155 u64 next_credit = queue->credit_window_start +
1156 msecs_to_jiffies(queue->credit_usec / 1000);
f942dc25
IC
1157
1158 /* Timer could already be pending in rare cases. */
e9ce7cb6 1159 if (timer_pending(&queue->credit_timeout))
f942dc25
IC
1160 return true;
1161
1162 /* Passed the point where we can replenish credit? */
059dfa6a 1163 if (time_after_eq64(now, next_credit)) {
e9ce7cb6
WL
1164 queue->credit_window_start = now;
1165 tx_add_credit(queue);
f942dc25
IC
1166 }
1167
1168 /* Still too big to send right now? Set a callback. */
e9ce7cb6
WL
1169 if (size > queue->remaining_credit) {
1170 queue->credit_timeout.data =
1171 (unsigned long)queue;
1172 queue->credit_timeout.function =
f942dc25 1173 tx_credit_callback;
e9ce7cb6 1174 mod_timer(&queue->credit_timeout,
f942dc25 1175 next_credit);
e9ce7cb6 1176 queue->credit_window_start = next_credit;
f942dc25
IC
1177
1178 return true;
1179 }
1180
1181 return false;
1182}
1183
e9ce7cb6 1184static void xenvif_tx_build_gops(struct xenvif_queue *queue,
bdab8275
ZK
1185 int budget,
1186 unsigned *copy_ops,
1187 unsigned *map_ops)
f942dc25 1188{
e9ce7cb6 1189 struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop;
f942dc25
IC
1190 struct sk_buff *skb;
1191 int ret;
1192
e9ce7cb6 1193 while (skb_queue_len(&queue->tx_queue) < budget) {
f942dc25 1194 struct xen_netif_tx_request txreq;
37641494 1195 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
f942dc25
IC
1196 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1197 u16 pending_idx;
1198 RING_IDX idx;
1199 int work_to_do;
1200 unsigned int data_len;
1201 pending_ring_idx_t index;
1202
e9ce7cb6 1203 if (queue->tx.sring->req_prod - queue->tx.req_cons >
48856286 1204 XEN_NETIF_TX_RING_SIZE) {
e9ce7cb6 1205 netdev_err(queue->vif->dev,
48856286
IC
1206 "Impossible number of requests. "
1207 "req_prod %d, req_cons %d, size %ld\n",
e9ce7cb6 1208 queue->tx.sring->req_prod, queue->tx.req_cons,
48856286 1209 XEN_NETIF_TX_RING_SIZE);
e9ce7cb6 1210 xenvif_fatal_tx_err(queue->vif);
e9d8b2c2 1211 break;
48856286
IC
1212 }
1213
e9ce7cb6 1214 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
b3f980bd
WL
1215 if (!work_to_do)
1216 break;
f942dc25 1217
e9ce7cb6 1218 idx = queue->tx.req_cons;
f942dc25 1219 rmb(); /* Ensure that we see the request before we copy it. */
e9ce7cb6 1220 memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq));
f942dc25
IC
1221
1222 /* Credit-based scheduling. */
e9ce7cb6
WL
1223 if (txreq.size > queue->remaining_credit &&
1224 tx_credit_exceeded(queue, txreq.size))
b3f980bd 1225 break;
f942dc25 1226
e9ce7cb6 1227 queue->remaining_credit -= txreq.size;
f942dc25
IC
1228
1229 work_to_do--;
e9ce7cb6 1230 queue->tx.req_cons = ++idx;
f942dc25
IC
1231
1232 memset(extras, 0, sizeof(extras));
1233 if (txreq.flags & XEN_NETTXF_extra_info) {
e9ce7cb6 1234 work_to_do = xenvif_get_extras(queue, extras,
7376419a 1235 work_to_do);
e9ce7cb6 1236 idx = queue->tx.req_cons;
48856286 1237 if (unlikely(work_to_do < 0))
b3f980bd 1238 break;
f942dc25
IC
1239 }
1240
e9ce7cb6 1241 ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
48856286 1242 if (unlikely(ret < 0))
b3f980bd 1243 break;
48856286 1244
f942dc25
IC
1245 idx += ret;
1246
1247 if (unlikely(txreq.size < ETH_HLEN)) {
e9ce7cb6 1248 netdev_dbg(queue->vif->dev,
f942dc25 1249 "Bad packet size: %d\n", txreq.size);
e9ce7cb6 1250 xenvif_tx_err(queue, &txreq, idx);
b3f980bd 1251 break;
f942dc25
IC
1252 }
1253
1254 /* No crossing a page as the payload mustn't fragment. */
1255 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
e9ce7cb6 1256 netdev_err(queue->vif->dev,
f942dc25
IC
1257 "txreq.offset: %x, size: %u, end: %lu\n",
1258 txreq.offset, txreq.size,
1259 (txreq.offset&~PAGE_MASK) + txreq.size);
e9ce7cb6 1260 xenvif_fatal_tx_err(queue->vif);
b3f980bd 1261 break;
f942dc25
IC
1262 }
1263
e9ce7cb6
WL
1264 index = pending_index(queue->pending_cons);
1265 pending_idx = queue->pending_ring[index];
f942dc25 1266
7e5d7753 1267 data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
37641494 1268 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
7e5d7753 1269 XEN_NETBACK_TX_COPY_LEN : txreq.size;
f942dc25 1270
e3377f36 1271 skb = xenvif_alloc_skb(data_len);
f942dc25 1272 if (unlikely(skb == NULL)) {
e9ce7cb6 1273 netdev_dbg(queue->vif->dev,
f942dc25 1274 "Can't allocate a skb in start_xmit.\n");
e9ce7cb6 1275 xenvif_tx_err(queue, &txreq, idx);
f942dc25
IC
1276 break;
1277 }
1278
f942dc25
IC
1279 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1280 struct xen_netif_extra_info *gso;
1281 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1282
e9ce7cb6 1283 if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
7376419a 1284 /* Failure in xenvif_set_skb_gso is fatal. */
f942dc25 1285 kfree_skb(skb);
b3f980bd 1286 break;
f942dc25
IC
1287 }
1288 }
1289
8f13dd96 1290 XENVIF_TX_CB(skb)->pending_idx = pending_idx;
f942dc25
IC
1291
1292 __skb_put(skb, data_len);
e9ce7cb6
WL
1293 queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
1294 queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
1295 queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
bdab8275 1296
e9ce7cb6 1297 queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
bdab8275 1298 virt_to_mfn(skb->data);
e9ce7cb6
WL
1299 queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
1300 queue->tx_copy_ops[*copy_ops].dest.offset =
bdab8275
ZK
1301 offset_in_page(skb->data);
1302
e9ce7cb6
WL
1303 queue->tx_copy_ops[*copy_ops].len = data_len;
1304 queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
bdab8275
ZK
1305
1306 (*copy_ops)++;
f942dc25
IC
1307
1308 skb_shinfo(skb)->nr_frags = ret;
1309 if (data_len < txreq.size) {
1310 skb_shinfo(skb)->nr_frags++;
ea066ad1
IC
1311 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1312 pending_idx);
e9ce7cb6 1313 xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
bdab8275 1314 gop++;
f942dc25 1315 } else {
ea066ad1
IC
1316 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1317 INVALID_PENDING_IDX);
e9ce7cb6 1318 memcpy(&queue->pending_tx_info[pending_idx].req, &txreq,
bdab8275 1319 sizeof(txreq));
f942dc25
IC
1320 }
1321
e9ce7cb6 1322 queue->pending_cons++;
f942dc25 1323
e9ce7cb6 1324 request_gop = xenvif_get_requests(queue, skb, txfrags, gop);
f942dc25
IC
1325 if (request_gop == NULL) {
1326 kfree_skb(skb);
e9ce7cb6 1327 xenvif_tx_err(queue, &txreq, idx);
b3f980bd 1328 break;
f942dc25
IC
1329 }
1330 gop = request_gop;
1331
e9ce7cb6 1332 __skb_queue_tail(&queue->tx_queue, skb);
1e0b6eac 1333
e9ce7cb6 1334 queue->tx.req_cons = idx;
f942dc25 1335
e9ce7cb6
WL
1336 if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
1337 (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
f942dc25
IC
1338 break;
1339 }
1340
e9ce7cb6 1341 (*map_ops) = gop - queue->tx_map_ops;
bdab8275 1342 return;
f942dc25
IC
1343}
1344
e3377f36
ZK
1345/* Consolidate skb with a frag_list into a brand new one with local pages on
1346 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1347 */
e9ce7cb6 1348static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
e3377f36
ZK
1349{
1350 unsigned int offset = skb_headlen(skb);
1351 skb_frag_t frags[MAX_SKB_FRAGS];
49d9991a 1352 int i, f;
e3377f36
ZK
1353 struct ubuf_info *uarg;
1354 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1355
e9ce7cb6
WL
1356 queue->stats.tx_zerocopy_sent += 2;
1357 queue->stats.tx_frag_overflow++;
e3377f36 1358
e9ce7cb6 1359 xenvif_fill_frags(queue, nskb);
e3377f36
ZK
1360 /* Subtract frags size, we will correct it later */
1361 skb->truesize -= skb->data_len;
1362 skb->len += nskb->len;
1363 skb->data_len += nskb->len;
1364
1365 /* create a brand new frags array and coalesce there */
1366 for (i = 0; offset < skb->len; i++) {
1367 struct page *page;
1368 unsigned int len;
1369
1370 BUG_ON(i >= MAX_SKB_FRAGS);
44cc8ed1 1371 page = alloc_page(GFP_ATOMIC);
e3377f36
ZK
1372 if (!page) {
1373 int j;
1374 skb->truesize += skb->data_len;
1375 for (j = 0; j < i; j++)
1376 put_page(frags[j].page.p);
1377 return -ENOMEM;
1378 }
1379
1380 if (offset + PAGE_SIZE < skb->len)
1381 len = PAGE_SIZE;
1382 else
1383 len = skb->len - offset;
1384 if (skb_copy_bits(skb, offset, page_address(page), len))
1385 BUG();
1386
1387 offset += len;
1388 frags[i].page.p = page;
1389 frags[i].page_offset = 0;
1390 skb_frag_size_set(&frags[i], len);
1391 }
49d9991a 1392
b0c21bad
DV
1393 /* Copied all the bits from the frag list -- free it. */
1394 skb_frag_list_init(skb);
1395 xenvif_skb_zerocopy_prepare(queue, nskb);
1396 kfree_skb(nskb);
1397
49d9991a
DV
1398 /* Release all the original (foreign) frags. */
1399 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1400 skb_frag_unref(skb, f);
e3377f36 1401 uarg = skb_shinfo(skb)->destructor_arg;
a64bd934
WL
1402 /* increase inflight counter to offset decrement in callback */
1403 atomic_inc(&queue->inflight_packets);
e3377f36
ZK
1404 uarg->callback(uarg, true);
1405 skb_shinfo(skb)->destructor_arg = NULL;
1406
b0c21bad
DV
1407 /* Fill the skb with the new (local) frags. */
1408 memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
1409 skb_shinfo(skb)->nr_frags = i;
1410 skb->truesize += i * PAGE_SIZE;
e3377f36
ZK
1411
1412 return 0;
1413}
b3f980bd 1414
e9ce7cb6 1415static int xenvif_tx_submit(struct xenvif_queue *queue)
f942dc25 1416{
e9ce7cb6
WL
1417 struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1418 struct gnttab_copy *gop_copy = queue->tx_copy_ops;
f942dc25 1419 struct sk_buff *skb;
b3f980bd 1420 int work_done = 0;
f942dc25 1421
e9ce7cb6 1422 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
f942dc25 1423 struct xen_netif_tx_request *txp;
f942dc25
IC
1424 u16 pending_idx;
1425 unsigned data_len;
1426
8f13dd96 1427 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
e9ce7cb6 1428 txp = &queue->pending_tx_info[pending_idx].req;
f942dc25
IC
1429
1430 /* Check the remap error code. */
e9ce7cb6 1431 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
b42cc6e4
ZK
1432 /* If there was an error, xenvif_tx_check_gop is
1433 * expected to release all the frags which were mapped,
1434 * so kfree_skb shouldn't do it again
1435 */
f942dc25 1436 skb_shinfo(skb)->nr_frags = 0;
b42cc6e4
ZK
1437 if (skb_has_frag_list(skb)) {
1438 struct sk_buff *nskb =
1439 skb_shinfo(skb)->frag_list;
1440 skb_shinfo(nskb)->nr_frags = 0;
1441 }
f942dc25
IC
1442 kfree_skb(skb);
1443 continue;
1444 }
1445
1446 data_len = skb->len;
e9ce7cb6 1447 callback_param(queue, pending_idx).ctx = NULL;
f942dc25
IC
1448 if (data_len < txp->size) {
1449 /* Append the packet payload as a fragment. */
1450 txp->offset += data_len;
1451 txp->size -= data_len;
1452 } else {
1453 /* Schedule a response immediately. */
e9ce7cb6 1454 xenvif_idx_release(queue, pending_idx,
bdab8275 1455 XEN_NETIF_RSP_OKAY);
f942dc25
IC
1456 }
1457
1458 if (txp->flags & XEN_NETTXF_csum_blank)
1459 skb->ip_summed = CHECKSUM_PARTIAL;
1460 else if (txp->flags & XEN_NETTXF_data_validated)
1461 skb->ip_summed = CHECKSUM_UNNECESSARY;
1462
e9ce7cb6 1463 xenvif_fill_frags(queue, skb);
f942dc25 1464
e3377f36 1465 if (unlikely(skb_has_frag_list(skb))) {
e9ce7cb6 1466 if (xenvif_handle_frag_list(queue, skb)) {
e3377f36 1467 if (net_ratelimit())
e9ce7cb6 1468 netdev_err(queue->vif->dev,
e3377f36 1469 "Not enough memory to consolidate frag_list!\n");
a64bd934 1470 xenvif_skb_zerocopy_prepare(queue, skb);
e3377f36
ZK
1471 kfree_skb(skb);
1472 continue;
1473 }
1474 }
1475
e9ce7cb6 1476 skb->dev = queue->vif->dev;
f942dc25 1477 skb->protocol = eth_type_trans(skb, skb->dev);
f9ca8f74 1478 skb_reset_network_header(skb);
f942dc25 1479
e9ce7cb6
WL
1480 if (checksum_setup(queue, skb)) {
1481 netdev_dbg(queue->vif->dev,
f942dc25 1482 "Can't setup checksum in net_tx_action\n");
f53c3fe8
ZK
1483 /* We have to set this flag to trigger the callback */
1484 if (skb_shinfo(skb)->destructor_arg)
a64bd934 1485 xenvif_skb_zerocopy_prepare(queue, skb);
f942dc25
IC
1486 kfree_skb(skb);
1487 continue;
1488 }
1489
40893fd0 1490 skb_probe_transport_header(skb, 0);
f9ca8f74 1491
b89587a7
PD
1492 /* If the packet is GSO then we will have just set up the
1493 * transport header offset in checksum_setup so it's now
1494 * straightforward to calculate gso_segs.
1495 */
1496 if (skb_is_gso(skb)) {
1497 int mss = skb_shinfo(skb)->gso_size;
1498 int hdrlen = skb_transport_header(skb) -
1499 skb_mac_header(skb) +
1500 tcp_hdrlen(skb);
1501
1502 skb_shinfo(skb)->gso_segs =
1503 DIV_ROUND_UP(skb->len - hdrlen, mss);
1504 }
1505
e9ce7cb6
WL
1506 queue->stats.rx_bytes += skb->len;
1507 queue->stats.rx_packets++;
f942dc25 1508
b3f980bd
WL
1509 work_done++;
1510
f53c3fe8
ZK
1511 /* Set this flag right before netif_receive_skb, otherwise
1512 * someone might think this packet already left netback, and
1513 * do a skb_copy_ubufs while we are still in control of the
1514 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1515 */
1bb332af 1516 if (skb_shinfo(skb)->destructor_arg) {
a64bd934 1517 xenvif_skb_zerocopy_prepare(queue, skb);
e9ce7cb6 1518 queue->stats.tx_zerocopy_sent++;
1bb332af 1519 }
f53c3fe8 1520
b3f980bd 1521 netif_receive_skb(skb);
f942dc25 1522 }
b3f980bd
WL
1523
1524 return work_done;
f942dc25
IC
1525}
1526
3e2234b3
ZK
1527void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1528{
f53c3fe8
ZK
1529 unsigned long flags;
1530 pending_ring_idx_t index;
e9ce7cb6 1531 struct xenvif_queue *queue = ubuf_to_queue(ubuf);
f53c3fe8
ZK
1532
1533 /* This is the only place where we grab this lock, to protect callbacks
1534 * from each other.
1535 */
e9ce7cb6 1536 spin_lock_irqsave(&queue->callback_lock, flags);
f53c3fe8
ZK
1537 do {
1538 u16 pending_idx = ubuf->desc;
1539 ubuf = (struct ubuf_info *) ubuf->ctx;
e9ce7cb6 1540 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
f53c3fe8 1541 MAX_PENDING_REQS);
e9ce7cb6
WL
1542 index = pending_index(queue->dealloc_prod);
1543 queue->dealloc_ring[index] = pending_idx;
f53c3fe8
ZK
1544 /* Sync with xenvif_tx_dealloc_action:
1545 * insert idx then incr producer.
1546 */
1547 smp_wmb();
e9ce7cb6 1548 queue->dealloc_prod++;
f53c3fe8 1549 } while (ubuf);
e9ce7cb6
WL
1550 wake_up(&queue->dealloc_wq);
1551 spin_unlock_irqrestore(&queue->callback_lock, flags);
f53c3fe8 1552
1bb332af 1553 if (likely(zerocopy_success))
e9ce7cb6 1554 queue->stats.tx_zerocopy_success++;
1bb332af 1555 else
e9ce7cb6 1556 queue->stats.tx_zerocopy_fail++;
a64bd934 1557 xenvif_skb_zerocopy_complete(queue);
f53c3fe8
ZK
1558}
1559
e9ce7cb6 1560static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
f53c3fe8
ZK
1561{
1562 struct gnttab_unmap_grant_ref *gop;
1563 pending_ring_idx_t dc, dp;
1564 u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1565 unsigned int i = 0;
1566
e9ce7cb6
WL
1567 dc = queue->dealloc_cons;
1568 gop = queue->tx_unmap_ops;
f53c3fe8
ZK
1569
1570 /* Free up any grants we have finished using */
1571 do {
e9ce7cb6 1572 dp = queue->dealloc_prod;
f53c3fe8
ZK
1573
1574 /* Ensure we see all indices enqueued by all
1575 * xenvif_zerocopy_callback().
1576 */
1577 smp_rmb();
1578
1579 while (dc != dp) {
e9ce7cb6 1580 BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS);
f53c3fe8 1581 pending_idx =
e9ce7cb6 1582 queue->dealloc_ring[pending_index(dc++)];
f53c3fe8 1583
e9ce7cb6 1584 pending_idx_release[gop-queue->tx_unmap_ops] =
f53c3fe8 1585 pending_idx;
e9ce7cb6
WL
1586 queue->pages_to_unmap[gop-queue->tx_unmap_ops] =
1587 queue->mmap_pages[pending_idx];
f53c3fe8 1588 gnttab_set_unmap_op(gop,
e9ce7cb6 1589 idx_to_kaddr(queue, pending_idx),
f53c3fe8 1590 GNTMAP_host_map,
e9ce7cb6
WL
1591 queue->grant_tx_handle[pending_idx]);
1592 xenvif_grant_handle_reset(queue, pending_idx);
f53c3fe8
ZK
1593 ++gop;
1594 }
1595
e9ce7cb6 1596 } while (dp != queue->dealloc_prod);
f53c3fe8 1597
e9ce7cb6 1598 queue->dealloc_cons = dc;
f53c3fe8 1599
e9ce7cb6 1600 if (gop - queue->tx_unmap_ops > 0) {
f53c3fe8 1601 int ret;
e9ce7cb6 1602 ret = gnttab_unmap_refs(queue->tx_unmap_ops,
f53c3fe8 1603 NULL,
e9ce7cb6
WL
1604 queue->pages_to_unmap,
1605 gop - queue->tx_unmap_ops);
f53c3fe8 1606 if (ret) {
e9ce7cb6
WL
1607 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
1608 gop - queue->tx_unmap_ops, ret);
1609 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
f53c3fe8 1610 if (gop[i].status != GNTST_okay)
e9ce7cb6 1611 netdev_err(queue->vif->dev,
f53c3fe8
ZK
1612 " host_addr: %llx handle: %x status: %d\n",
1613 gop[i].host_addr,
1614 gop[i].handle,
1615 gop[i].status);
1616 }
1617 BUG();
1618 }
1619 }
1620
e9ce7cb6
WL
1621 for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1622 xenvif_idx_release(queue, pending_idx_release[i],
f53c3fe8 1623 XEN_NETIF_RSP_OKAY);
3e2234b3
ZK
1624}
1625
f53c3fe8 1626
f942dc25 1627/* Called after netfront has transmitted */
e9ce7cb6 1628int xenvif_tx_action(struct xenvif_queue *queue, int budget)
f942dc25 1629{
bdab8275 1630 unsigned nr_mops, nr_cops = 0;
f53c3fe8 1631 int work_done, ret;
f942dc25 1632
e9ce7cb6 1633 if (unlikely(!tx_work_todo(queue)))
b3f980bd
WL
1634 return 0;
1635
e9ce7cb6 1636 xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
f942dc25 1637
bdab8275 1638 if (nr_cops == 0)
b3f980bd
WL
1639 return 0;
1640
e9ce7cb6 1641 gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
bdab8275 1642 if (nr_mops != 0) {
e9ce7cb6 1643 ret = gnttab_map_refs(queue->tx_map_ops,
bdab8275 1644 NULL,
e9ce7cb6 1645 queue->pages_to_map,
bdab8275
ZK
1646 nr_mops);
1647 BUG_ON(ret);
1648 }
f942dc25 1649
e9ce7cb6 1650 work_done = xenvif_tx_submit(queue);
f942dc25 1651
b3f980bd 1652 return work_done;
f942dc25
IC
1653}
1654
e9ce7cb6 1655static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
7376419a 1656 u8 status)
f942dc25 1657{
f942dc25 1658 struct pending_tx_info *pending_tx_info;
f53c3fe8 1659 pending_ring_idx_t index;
7fbb9d84 1660 int notify;
f53c3fe8 1661 unsigned long flags;
2810e5b9 1662
e9ce7cb6 1663 pending_tx_info = &queue->pending_tx_info[pending_idx];
7fbb9d84 1664
e9ce7cb6 1665 spin_lock_irqsave(&queue->response_lock, flags);
7fbb9d84 1666
e9ce7cb6 1667 make_tx_response(queue, &pending_tx_info->req, status);
7fbb9d84
DV
1668
1669 /* Release the pending index before pusing the Tx response so
1670 * its available before a new Tx request is pushed by the
1671 * frontend.
1672 */
1673 index = pending_index(queue->pending_prod++);
e9ce7cb6 1674 queue->pending_ring[index] = pending_idx;
7fbb9d84
DV
1675
1676 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1677
e9ce7cb6 1678 spin_unlock_irqrestore(&queue->response_lock, flags);
7fbb9d84
DV
1679
1680 if (notify)
1681 notify_remote_via_irq(queue->tx_irq);
f942dc25
IC
1682}
1683
2810e5b9 1684
e9ce7cb6 1685static void make_tx_response(struct xenvif_queue *queue,
f942dc25
IC
1686 struct xen_netif_tx_request *txp,
1687 s8 st)
1688{
e9ce7cb6 1689 RING_IDX i = queue->tx.rsp_prod_pvt;
f942dc25 1690 struct xen_netif_tx_response *resp;
f942dc25 1691
e9ce7cb6 1692 resp = RING_GET_RESPONSE(&queue->tx, i);
f942dc25
IC
1693 resp->id = txp->id;
1694 resp->status = st;
1695
1696 if (txp->flags & XEN_NETTXF_extra_info)
e9ce7cb6 1697 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
f942dc25 1698
e9ce7cb6 1699 queue->tx.rsp_prod_pvt = ++i;
f942dc25
IC
1700}
1701
e9ce7cb6 1702static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
f942dc25
IC
1703 u16 id,
1704 s8 st,
1705 u16 offset,
1706 u16 size,
1707 u16 flags)
1708{
e9ce7cb6 1709 RING_IDX i = queue->rx.rsp_prod_pvt;
f942dc25
IC
1710 struct xen_netif_rx_response *resp;
1711
e9ce7cb6 1712 resp = RING_GET_RESPONSE(&queue->rx, i);
f942dc25
IC
1713 resp->offset = offset;
1714 resp->flags = flags;
1715 resp->id = id;
1716 resp->status = (s16)size;
1717 if (st < 0)
1718 resp->status = (s16)st;
1719
e9ce7cb6 1720 queue->rx.rsp_prod_pvt = ++i;
f942dc25
IC
1721
1722 return resp;
1723}
1724
e9ce7cb6 1725void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
f53c3fe8
ZK
1726{
1727 int ret;
1728 struct gnttab_unmap_grant_ref tx_unmap_op;
1729
1730 gnttab_set_unmap_op(&tx_unmap_op,
e9ce7cb6 1731 idx_to_kaddr(queue, pending_idx),
f53c3fe8 1732 GNTMAP_host_map,
e9ce7cb6
WL
1733 queue->grant_tx_handle[pending_idx]);
1734 xenvif_grant_handle_reset(queue, pending_idx);
f53c3fe8
ZK
1735
1736 ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
e9ce7cb6 1737 &queue->mmap_pages[pending_idx], 1);
7aceb47a 1738 if (ret) {
e9ce7cb6 1739 netdev_err(queue->vif->dev,
7aceb47a
ZK
1740 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
1741 ret,
1742 pending_idx,
1743 tx_unmap_op.host_addr,
1744 tx_unmap_op.handle,
1745 tx_unmap_op.status);
1746 BUG();
1747 }
f53c3fe8
ZK
1748}
1749
e9ce7cb6 1750static inline int tx_work_todo(struct xenvif_queue *queue)
f942dc25 1751{
e9ce7cb6 1752 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
f942dc25
IC
1753 return 1;
1754
1755 return 0;
1756}
1757
e9ce7cb6 1758static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
f53c3fe8 1759{
e9ce7cb6 1760 return queue->dealloc_cons != queue->dealloc_prod;
f53c3fe8
ZK
1761}
1762
e9ce7cb6 1763void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
f942dc25 1764{
e9ce7cb6
WL
1765 if (queue->tx.sring)
1766 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1767 queue->tx.sring);
1768 if (queue->rx.sring)
1769 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1770 queue->rx.sring);
f942dc25
IC
1771}
1772
e9ce7cb6 1773int xenvif_map_frontend_rings(struct xenvif_queue *queue,
7376419a
WL
1774 grant_ref_t tx_ring_ref,
1775 grant_ref_t rx_ring_ref)
f942dc25 1776{
c9d63699 1777 void *addr;
f942dc25
IC
1778 struct xen_netif_tx_sring *txs;
1779 struct xen_netif_rx_sring *rxs;
1780
1781 int err = -ENOMEM;
1782
e9ce7cb6 1783 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
c9d63699
DV
1784 tx_ring_ref, &addr);
1785 if (err)
f942dc25
IC
1786 goto err;
1787
c9d63699 1788 txs = (struct xen_netif_tx_sring *)addr;
e9ce7cb6 1789 BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE);
f942dc25 1790
e9ce7cb6 1791 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
c9d63699
DV
1792 rx_ring_ref, &addr);
1793 if (err)
f942dc25 1794 goto err;
f942dc25 1795
c9d63699 1796 rxs = (struct xen_netif_rx_sring *)addr;
e9ce7cb6 1797 BACK_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
f942dc25
IC
1798
1799 return 0;
1800
1801err:
e9ce7cb6 1802 xenvif_unmap_frontend_rings(queue);
f942dc25
IC
1803 return err;
1804}
1805
ecf08d2d 1806static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
ca2f09f2 1807{
ecf08d2d
DV
1808 struct xenvif *vif = queue->vif;
1809
1810 queue->stalled = true;
1811
1812 /* At least one queue has stalled? Disable the carrier. */
1813 spin_lock(&vif->lock);
1814 if (vif->stalled_queues++ == 0) {
1815 netdev_info(vif->dev, "Guest Rx stalled");
1816 netif_carrier_off(vif->dev);
1817 }
1818 spin_unlock(&vif->lock);
ca2f09f2
PD
1819}
1820
ecf08d2d 1821static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
f34a4cf9 1822{
ecf08d2d 1823 struct xenvif *vif = queue->vif;
f34a4cf9 1824
ecf08d2d
DV
1825 queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
1826 queue->stalled = false;
f34a4cf9 1827
ecf08d2d
DV
1828 /* All queues are ready? Enable the carrier. */
1829 spin_lock(&vif->lock);
1830 if (--vif->stalled_queues == 0) {
1831 netdev_info(vif->dev, "Guest Rx ready");
1832 netif_carrier_on(vif->dev);
1833 }
1834 spin_unlock(&vif->lock);
1835}
f34a4cf9 1836
ecf08d2d
DV
1837static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
1838{
1839 RING_IDX prod, cons;
1840
1841 prod = queue->rx.sring->req_prod;
1842 cons = queue->rx.req_cons;
1843
1844 return !queue->stalled
1845 && prod - cons < XEN_NETBK_RX_SLOTS_MAX
1846 && time_after(jiffies,
26c0e102 1847 queue->last_rx_time + queue->vif->stall_timeout);
ecf08d2d
DV
1848}
1849
1850static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
1851{
1852 RING_IDX prod, cons;
1853
1854 prod = queue->rx.sring->req_prod;
1855 cons = queue->rx.req_cons;
1856
1857 return queue->stalled
1858 && prod - cons >= XEN_NETBK_RX_SLOTS_MAX;
1859}
1860
f48da8b1 1861static bool xenvif_have_rx_work(struct xenvif_queue *queue)
ca2f09f2 1862{
f48da8b1
DV
1863 return (!skb_queue_empty(&queue->rx_queue)
1864 && xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX))
26c0e102
DV
1865 || (queue->vif->stall_timeout &&
1866 (xenvif_rx_queue_stalled(queue)
1867 || xenvif_rx_queue_ready(queue)))
f48da8b1
DV
1868 || kthread_should_stop()
1869 || queue->vif->disabled;
ca2f09f2
PD
1870}
1871
f48da8b1 1872static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
f34a4cf9 1873{
f48da8b1
DV
1874 struct sk_buff *skb;
1875 long timeout;
f34a4cf9 1876
f48da8b1
DV
1877 skb = skb_peek(&queue->rx_queue);
1878 if (!skb)
1879 return MAX_SCHEDULE_TIMEOUT;
f34a4cf9 1880
f48da8b1
DV
1881 timeout = XENVIF_RX_CB(skb)->expires - jiffies;
1882 return timeout < 0 ? 0 : timeout;
1883}
f34a4cf9 1884
f48da8b1
DV
1885/* Wait until the guest Rx thread has work.
1886 *
1887 * The timeout needs to be adjusted based on the current head of the
1888 * queue (and not just the head at the beginning). In particular, if
1889 * the queue is initially empty an infinite timeout is used and this
1890 * needs to be reduced when a skb is queued.
1891 *
1892 * This cannot be done with wait_event_timeout() because it only
1893 * calculates the timeout once.
1894 */
1895static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
1896{
1897 DEFINE_WAIT(wait);
1898
1899 if (xenvif_have_rx_work(queue))
1900 return;
1901
1902 for (;;) {
1903 long ret;
1904
1905 prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
1906 if (xenvif_have_rx_work(queue))
1907 break;
1908 ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
1909 if (!ret)
1910 break;
f34a4cf9 1911 }
f48da8b1 1912 finish_wait(&queue->wq, &wait);
f34a4cf9
ZK
1913}
1914
121fa4b7 1915int xenvif_kthread_guest_rx(void *data)
b3f980bd 1916{
e9ce7cb6 1917 struct xenvif_queue *queue = data;
f48da8b1 1918 struct xenvif *vif = queue->vif;
b3f980bd 1919
26c0e102
DV
1920 if (!vif->stall_timeout)
1921 xenvif_queue_carrier_on(queue);
1922
f48da8b1
DV
1923 for (;;) {
1924 xenvif_wait_for_rx_work(queue);
e9d8b2c2 1925
f34a4cf9
ZK
1926 if (kthread_should_stop())
1927 break;
1928
e9d8b2c2
WL
1929 /* This frontend is found to be rogue, disable it in
1930 * kthread context. Currently this is only set when
1931 * netback finds out frontend sends malformed packet,
1932 * but we cannot disable the interface in softirq
e9ce7cb6
WL
1933 * context so we defer it here, if this thread is
1934 * associated with queue 0.
e9d8b2c2 1935 */
f48da8b1
DV
1936 if (unlikely(vif->disabled && queue->id == 0)) {
1937 xenvif_carrier_off(vif);
42b5212f 1938 break;
09350788
ZK
1939 }
1940
e9ce7cb6
WL
1941 if (!skb_queue_empty(&queue->rx_queue))
1942 xenvif_rx_action(queue);
b3f980bd 1943
ecf08d2d
DV
1944 /* If the guest hasn't provided any Rx slots for a
1945 * while it's probably not responsive, drop the
1946 * carrier so packets are dropped earlier.
1947 */
26c0e102
DV
1948 if (vif->stall_timeout) {
1949 if (xenvif_rx_queue_stalled(queue))
1950 xenvif_queue_carrier_off(queue);
1951 else if (xenvif_rx_queue_ready(queue))
1952 xenvif_queue_carrier_on(queue);
1953 }
ecf08d2d 1954
f48da8b1
DV
1955 /* Queued packets may have foreign pages from other
1956 * domains. These cannot be queued indefinitely as
1957 * this would starve guests of grant refs and transmit
1958 * slots.
1959 */
1960 xenvif_rx_queue_drop_expired(queue);
1961
1962 xenvif_rx_queue_maybe_wake(queue);
1963
b3f980bd
WL
1964 cond_resched();
1965 }
1966
ca2f09f2 1967 /* Bin any remaining skbs */
f48da8b1 1968 xenvif_rx_queue_purge(queue);
ca2f09f2 1969
b3f980bd
WL
1970 return 0;
1971}
1972
a64bd934
WL
1973static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
1974{
1975 /* Dealloc thread must remain running until all inflight
1976 * packets complete.
1977 */
1978 return kthread_should_stop() &&
1979 !atomic_read(&queue->inflight_packets);
1980}
1981
f53c3fe8
ZK
1982int xenvif_dealloc_kthread(void *data)
1983{
e9ce7cb6 1984 struct xenvif_queue *queue = data;
f53c3fe8 1985
a64bd934 1986 for (;;) {
e9ce7cb6
WL
1987 wait_event_interruptible(queue->dealloc_wq,
1988 tx_dealloc_work_todo(queue) ||
a64bd934
WL
1989 xenvif_dealloc_kthread_should_stop(queue));
1990 if (xenvif_dealloc_kthread_should_stop(queue))
f53c3fe8
ZK
1991 break;
1992
e9ce7cb6 1993 xenvif_tx_dealloc_action(queue);
f53c3fe8
ZK
1994 cond_resched();
1995 }
1996
1997 /* Unmap anything remaining*/
e9ce7cb6
WL
1998 if (tx_dealloc_work_todo(queue))
1999 xenvif_tx_dealloc_action(queue);
f53c3fe8
ZK
2000
2001 return 0;
2002}
2003
f942dc25
IC
2004static int __init netback_init(void)
2005{
f942dc25 2006 int rc = 0;
f942dc25 2007
2a14b244 2008 if (!xen_domain())
f942dc25
IC
2009 return -ENODEV;
2010
8d3d53b3
AB
2011 /* Allow as many queues as there are CPUs, by default */
2012 xenvif_max_queues = num_online_cpus();
2013
37641494 2014 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
383eda32
JP
2015 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
2016 fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
37641494 2017 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
2810e5b9
WL
2018 }
2019
f942dc25
IC
2020 rc = xenvif_xenbus_init();
2021 if (rc)
2022 goto failed_init;
2023
f51de243
ZK
2024#ifdef CONFIG_DEBUG_FS
2025 xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
2026 if (IS_ERR_OR_NULL(xen_netback_dbg_root))
2027 pr_warn("Init of debugfs returned %ld!\n",
2028 PTR_ERR(xen_netback_dbg_root));
2029#endif /* CONFIG_DEBUG_FS */
2030
f942dc25
IC
2031 return 0;
2032
2033failed_init:
f942dc25 2034 return rc;
f942dc25
IC
2035}
2036
2037module_init(netback_init);
2038
b103f358
WL
2039static void __exit netback_fini(void)
2040{
f51de243
ZK
2041#ifdef CONFIG_DEBUG_FS
2042 if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
2043 debugfs_remove_recursive(xen_netback_dbg_root);
2044#endif /* CONFIG_DEBUG_FS */
b103f358 2045 xenvif_xenbus_fini();
b103f358
WL
2046}
2047module_exit(netback_fini);
2048
f942dc25 2049MODULE_LICENSE("Dual BSD/GPL");
f984cec6 2050MODULE_ALIAS("xen-backend:vif");