linux: Add skb_frag_t page_offset accessors
[linux-2.6-block.git] / drivers / net / xen-netback / netback.c
CommitLineData
f942dc25
IC
1/*
2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
7 *
8 * Copyright (c) 2002-2005, K A Fraser
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
22 *
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * IN THE SOFTWARE.
33 */
34
35#include "common.h"
36
37#include <linux/kthread.h>
38#include <linux/if_vlan.h>
39#include <linux/udp.h>
e3377f36 40#include <linux/highmem.h>
f942dc25
IC
41
42#include <net/tcp.h>
43
ca981633 44#include <xen/xen.h>
f942dc25
IC
45#include <xen/events.h>
46#include <xen/interface/memory.h>
a9fd60e2 47#include <xen/page.h>
f942dc25
IC
48
49#include <asm/xen/hypercall.h>
f942dc25 50
e1f00a69
WL
51/* Provide an option to disable split event channels at load time as
52 * event channels are limited resource. Split event channels are
53 * enabled by default.
54 */
c489dbb1 55bool separate_tx_rx_irq = true;
e1f00a69
WL
56module_param(separate_tx_rx_irq, bool, 0644);
57
f48da8b1
DV
58/* The time that packets can stay on the guest Rx internal queue
59 * before they are dropped.
09350788
ZK
60 */
61unsigned int rx_drain_timeout_msecs = 10000;
62module_param(rx_drain_timeout_msecs, uint, 0444);
09350788 63
ecf08d2d
DV
64/* The length of time before the frontend is considered unresponsive
65 * because it isn't providing Rx slots.
66 */
26c0e102 67unsigned int rx_stall_timeout_msecs = 60000;
ecf08d2d 68module_param(rx_stall_timeout_msecs, uint, 0444);
ecf08d2d 69
56dd5af9 70#define MAX_QUEUES_DEFAULT 8
8d3d53b3
AB
71unsigned int xenvif_max_queues;
72module_param_named(max_queues, xenvif_max_queues, uint, 0644);
73MODULE_PARM_DESC(max_queues,
74 "Maximum number of queues per virtual interface");
75
2810e5b9
WL
76/*
77 * This is the maximum slots a skb can have. If a guest sends a skb
78 * which exceeds this limit it is considered malicious.
79 */
37641494
WL
80#define FATAL_SKB_SLOTS_DEFAULT 20
81static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
82module_param(fatal_skb_slots, uint, 0444);
83
7e5d7753
MC
84/* The amount to copy out of the first guest Tx slot into the skb's
85 * linear area. If the first slot has more data, it will be mapped
86 * and put into the first frag.
87 *
88 * This is sized to avoid pulling headers from the frags for most
89 * TCP/IP packets.
90 */
91#define XEN_NETBACK_TX_COPY_LEN 128
92
40d8abde
PD
93/* This is the maximum number of flows in the hash cache. */
94#define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
95unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
96module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
97MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
7e5d7753 98
e9ce7cb6 99static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
7376419a
WL
100 u8 status);
101
e9ce7cb6 102static void make_tx_response(struct xenvif_queue *queue,
f942dc25 103 struct xen_netif_tx_request *txp,
562abd39 104 unsigned int extra_count,
f942dc25 105 s8 st);
c8a4d299 106static void push_tx_responses(struct xenvif_queue *queue);
b3f980bd 107
e9ce7cb6 108static inline int tx_work_todo(struct xenvif_queue *queue);
b3f980bd 109
e9ce7cb6 110static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
ea066ad1 111 u16 idx)
f942dc25 112{
e9ce7cb6 113 return page_to_pfn(queue->mmap_pages[idx]);
f942dc25
IC
114}
115
e9ce7cb6 116static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
ea066ad1 117 u16 idx)
f942dc25 118{
e9ce7cb6 119 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
f942dc25
IC
120}
121
7aceb47a
ZK
122#define callback_param(vif, pending_idx) \
123 (vif->pending_tx_info[pending_idx].callback_struct)
124
f53c3fe8
ZK
125/* Find the containing VIF's structure from a pointer in pending_tx_info array
126 */
e9ce7cb6 127static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
3e2234b3 128{
f53c3fe8
ZK
129 u16 pending_idx = ubuf->desc;
130 struct pending_tx_info *temp =
131 container_of(ubuf, struct pending_tx_info, callback_struct);
132 return container_of(temp - pending_idx,
e9ce7cb6 133 struct xenvif_queue,
f53c3fe8 134 pending_tx_info[0]);
3e2234b3 135}
f53c3fe8 136
ea066ad1
IC
137static u16 frag_get_pending_idx(skb_frag_t *frag)
138{
139 return (u16)frag->page_offset;
140}
141
142static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
143{
144 frag->page_offset = pending_idx;
145}
146
f942dc25
IC
147static inline pending_ring_idx_t pending_index(unsigned i)
148{
149 return i & (MAX_PENDING_REQS-1);
150}
151
e9ce7cb6 152void xenvif_kick_thread(struct xenvif_queue *queue)
b3f980bd 153{
e9ce7cb6 154 wake_up(&queue->wq);
b3f980bd
WL
155}
156
e9ce7cb6 157void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
f942dc25
IC
158{
159 int more_to_do;
160
e9ce7cb6 161 RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
f942dc25
IC
162
163 if (more_to_do)
e9ce7cb6 164 napi_schedule(&queue->napi);
f942dc25
IC
165}
166
e9ce7cb6 167static void tx_add_credit(struct xenvif_queue *queue)
f942dc25
IC
168{
169 unsigned long max_burst, max_credit;
170
171 /*
172 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
173 * Otherwise the interface can seize up due to insufficient credit.
174 */
0f589967 175 max_burst = max(131072UL, queue->credit_bytes);
f942dc25
IC
176
177 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
e9ce7cb6
WL
178 max_credit = queue->remaining_credit + queue->credit_bytes;
179 if (max_credit < queue->remaining_credit)
f942dc25
IC
180 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
181
e9ce7cb6 182 queue->remaining_credit = min(max_credit, max_burst);
dfa523ae 183 queue->rate_limited = false;
f942dc25
IC
184}
185
cac6a8f9 186void xenvif_tx_credit_callback(struct timer_list *t)
f942dc25 187{
cac6a8f9 188 struct xenvif_queue *queue = from_timer(queue, t, credit_timeout);
e9ce7cb6
WL
189 tx_add_credit(queue);
190 xenvif_napi_schedule_or_enable_events(queue);
f942dc25
IC
191}
192
e9ce7cb6 193static void xenvif_tx_err(struct xenvif_queue *queue,
562abd39
PD
194 struct xen_netif_tx_request *txp,
195 unsigned int extra_count, RING_IDX end)
f942dc25 196{
e9ce7cb6 197 RING_IDX cons = queue->tx.req_cons;
f53c3fe8 198 unsigned long flags;
f942dc25
IC
199
200 do {
e9ce7cb6 201 spin_lock_irqsave(&queue->response_lock, flags);
562abd39 202 make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
c8a4d299 203 push_tx_responses(queue);
e9ce7cb6 204 spin_unlock_irqrestore(&queue->response_lock, flags);
b9149729 205 if (cons == end)
f942dc25 206 break;
68a33bfd 207 RING_COPY_REQUEST(&queue->tx, cons++, txp);
72eec92a 208 extra_count = 0; /* only the first frag can have extras */
f942dc25 209 } while (1);
e9ce7cb6 210 queue->tx.req_cons = cons;
f942dc25
IC
211}
212
7376419a 213static void xenvif_fatal_tx_err(struct xenvif *vif)
48856286
IC
214{
215 netdev_err(vif->dev, "fatal error; disabling device\n");
e9d8b2c2 216 vif->disabled = true;
e9ce7cb6 217 /* Disable the vif from queue 0's kthread */
b17075d5 218 if (vif->num_queues)
e9ce7cb6 219 xenvif_kick_thread(&vif->queues[0]);
48856286
IC
220}
221
e9ce7cb6 222static int xenvif_count_requests(struct xenvif_queue *queue,
7376419a 223 struct xen_netif_tx_request *first,
562abd39 224 unsigned int extra_count,
7376419a
WL
225 struct xen_netif_tx_request *txp,
226 int work_to_do)
f942dc25 227{
e9ce7cb6 228 RING_IDX cons = queue->tx.req_cons;
2810e5b9
WL
229 int slots = 0;
230 int drop_err = 0;
59ccb4eb 231 int more_data;
f942dc25
IC
232
233 if (!(first->flags & XEN_NETTXF_more_data))
234 return 0;
235
236 do {
59ccb4eb
WL
237 struct xen_netif_tx_request dropped_tx = { 0 };
238
2810e5b9 239 if (slots >= work_to_do) {
e9ce7cb6 240 netdev_err(queue->vif->dev,
2810e5b9
WL
241 "Asked for %d slots but exceeds this limit\n",
242 work_to_do);
e9ce7cb6 243 xenvif_fatal_tx_err(queue->vif);
35876b5f 244 return -ENODATA;
f942dc25
IC
245 }
246
2810e5b9
WL
247 /* This guest is really using too many slots and
248 * considered malicious.
249 */
37641494 250 if (unlikely(slots >= fatal_skb_slots)) {
e9ce7cb6 251 netdev_err(queue->vif->dev,
2810e5b9 252 "Malicious frontend using %d slots, threshold %u\n",
37641494 253 slots, fatal_skb_slots);
e9ce7cb6 254 xenvif_fatal_tx_err(queue->vif);
35876b5f 255 return -E2BIG;
f942dc25
IC
256 }
257
2810e5b9 258 /* Xen network protocol had implicit dependency on
37641494
WL
259 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
260 * the historical MAX_SKB_FRAGS value 18 to honor the
261 * same behavior as before. Any packet using more than
262 * 18 slots but less than fatal_skb_slots slots is
263 * dropped
2810e5b9 264 */
37641494 265 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
2810e5b9 266 if (net_ratelimit())
e9ce7cb6 267 netdev_dbg(queue->vif->dev,
2810e5b9 268 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
37641494 269 slots, XEN_NETBK_LEGACY_SLOTS_MAX);
2810e5b9
WL
270 drop_err = -E2BIG;
271 }
272
59ccb4eb
WL
273 if (drop_err)
274 txp = &dropped_tx;
275
68a33bfd 276 RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
03393fd5
WL
277
278 /* If the guest submitted a frame >= 64 KiB then
279 * first->size overflowed and following slots will
280 * appear to be larger than the frame.
281 *
282 * This cannot be fatal error as there are buggy
283 * frontends that do this.
284 *
285 * Consume all slots and drop the packet.
286 */
287 if (!drop_err && txp->size > first->size) {
288 if (net_ratelimit())
e9ce7cb6 289 netdev_dbg(queue->vif->dev,
03393fd5
WL
290 "Invalid tx request, slot size %u > remaining size %u\n",
291 txp->size, first->size);
292 drop_err = -EIO;
f942dc25
IC
293 }
294
295 first->size -= txp->size;
2810e5b9 296 slots++;
f942dc25 297
d0089e8a 298 if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
68946159 299 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
f942dc25 300 txp->offset, txp->size);
e9ce7cb6 301 xenvif_fatal_tx_err(queue->vif);
35876b5f 302 return -EINVAL;
f942dc25 303 }
59ccb4eb
WL
304
305 more_data = txp->flags & XEN_NETTXF_more_data;
306
307 if (!drop_err)
308 txp++;
309
310 } while (more_data);
2810e5b9
WL
311
312 if (drop_err) {
562abd39 313 xenvif_tx_err(queue, first, extra_count, cons + slots);
2810e5b9
WL
314 return drop_err;
315 }
316
317 return slots;
f942dc25
IC
318}
319
8f13dd96
ZK
320
321struct xenvif_tx_cb {
322 u16 pending_idx;
323};
324
325#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
326
e9ce7cb6 327static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
562abd39
PD
328 u16 pending_idx,
329 struct xen_netif_tx_request *txp,
330 unsigned int extra_count,
331 struct gnttab_map_grant_ref *mop)
f53c3fe8 332{
e9ce7cb6
WL
333 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
334 gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
f53c3fe8 335 GNTMAP_host_map | GNTMAP_readonly,
e9ce7cb6 336 txp->gref, queue->vif->domid);
f53c3fe8 337
e9ce7cb6 338 memcpy(&queue->pending_tx_info[pending_idx].req, txp,
f53c3fe8 339 sizeof(*txp));
562abd39 340 queue->pending_tx_info[pending_idx].extra_count = extra_count;
f53c3fe8
ZK
341}
342
e3377f36
ZK
343static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
344{
345 struct sk_buff *skb =
346 alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
347 GFP_ATOMIC | __GFP_NOWARN);
348 if (unlikely(skb == NULL))
349 return NULL;
350
351 /* Packets passed to netif_rx() must have some headroom. */
352 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
353
354 /* Initialize it here to avoid later surprises */
355 skb_shinfo(skb)->destructor_arg = NULL;
356
357 return skb;
358}
359
e9ce7cb6 360static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
f53c3fe8
ZK
361 struct sk_buff *skb,
362 struct xen_netif_tx_request *txp,
2475b225
RL
363 struct gnttab_map_grant_ref *gop,
364 unsigned int frag_overflow,
365 struct sk_buff *nskb)
f942dc25
IC
366{
367 struct skb_shared_info *shinfo = skb_shinfo(skb);
368 skb_frag_t *frags = shinfo->frags;
8f13dd96 369 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
62bad319
ZK
370 int start;
371 pending_ring_idx_t index;
2475b225 372 unsigned int nr_slots;
2810e5b9 373
2810e5b9 374 nr_slots = shinfo->nr_frags;
f942dc25
IC
375
376 /* Skip first skb fragment if it is on same page as header fragment. */
ea066ad1 377 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
f942dc25 378
f53c3fe8
ZK
379 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
380 shinfo->nr_frags++, txp++, gop++) {
e9ce7cb6
WL
381 index = pending_index(queue->pending_cons++);
382 pending_idx = queue->pending_ring[index];
562abd39 383 xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
f53c3fe8 384 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
f942dc25
IC
385 }
386
e3377f36 387 if (frag_overflow) {
e3377f36
ZK
388
389 shinfo = skb_shinfo(nskb);
390 frags = shinfo->frags;
391
392 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
393 shinfo->nr_frags++, txp++, gop++) {
e9ce7cb6
WL
394 index = pending_index(queue->pending_cons++);
395 pending_idx = queue->pending_ring[index];
562abd39
PD
396 xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
397 gop);
e3377f36
ZK
398 frag_set_pending_idx(&frags[shinfo->nr_frags],
399 pending_idx);
400 }
401
402 skb_shinfo(skb)->frag_list = nskb;
403 }
2810e5b9 404
f942dc25
IC
405 return gop;
406}
407
e9ce7cb6 408static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
f53c3fe8
ZK
409 u16 pending_idx,
410 grant_handle_t handle)
411{
e9ce7cb6 412 if (unlikely(queue->grant_tx_handle[pending_idx] !=
f53c3fe8 413 NETBACK_INVALID_HANDLE)) {
e9ce7cb6 414 netdev_err(queue->vif->dev,
68946159 415 "Trying to overwrite active handle! pending_idx: 0x%x\n",
f53c3fe8
ZK
416 pending_idx);
417 BUG();
418 }
e9ce7cb6 419 queue->grant_tx_handle[pending_idx] = handle;
f53c3fe8
ZK
420}
421
e9ce7cb6 422static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
f53c3fe8
ZK
423 u16 pending_idx)
424{
e9ce7cb6 425 if (unlikely(queue->grant_tx_handle[pending_idx] ==
f53c3fe8 426 NETBACK_INVALID_HANDLE)) {
e9ce7cb6 427 netdev_err(queue->vif->dev,
68946159 428 "Trying to unmap invalid handle! pending_idx: 0x%x\n",
f53c3fe8
ZK
429 pending_idx);
430 BUG();
431 }
e9ce7cb6 432 queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
f53c3fe8
ZK
433}
434
e9ce7cb6 435static int xenvif_tx_check_gop(struct xenvif_queue *queue,
7376419a 436 struct sk_buff *skb,
bdab8275
ZK
437 struct gnttab_map_grant_ref **gopp_map,
438 struct gnttab_copy **gopp_copy)
f942dc25 439{
9074ce24 440 struct gnttab_map_grant_ref *gop_map = *gopp_map;
8f13dd96 441 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1a998d3e
ZK
442 /* This always points to the shinfo of the skb being checked, which
443 * could be either the first or the one on the frag_list
444 */
f942dc25 445 struct skb_shared_info *shinfo = skb_shinfo(skb);
1a998d3e
ZK
446 /* If this is non-NULL, we are currently checking the frag_list skb, and
447 * this points to the shinfo of the first one
448 */
449 struct skb_shared_info *first_shinfo = NULL;
f942dc25 450 int nr_frags = shinfo->nr_frags;
1b860da0
ZK
451 const bool sharedslot = nr_frags &&
452 frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
bdab8275 453 int i, err;
f942dc25
IC
454
455 /* Check status of header. */
bdab8275 456 err = (*gopp_copy)->status;
bdab8275
ZK
457 if (unlikely(err)) {
458 if (net_ratelimit())
e9ce7cb6 459 netdev_dbg(queue->vif->dev,
00aefceb 460 "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
bdab8275
ZK
461 (*gopp_copy)->status,
462 pending_idx,
463 (*gopp_copy)->source.u.ref);
1b860da0
ZK
464 /* The first frag might still have this slot mapped */
465 if (!sharedslot)
466 xenvif_idx_release(queue, pending_idx,
467 XEN_NETIF_RSP_ERROR);
bdab8275 468 }
d8cfbfc4 469 (*gopp_copy)++;
f942dc25 470
e3377f36 471check_frags:
bdab8275 472 for (i = 0; i < nr_frags; i++, gop_map++) {
f942dc25 473 int j, newerr;
f942dc25 474
ea066ad1 475 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
f942dc25
IC
476
477 /* Check error status: if okay then remember grant handle. */
bdab8275 478 newerr = gop_map->status;
2810e5b9 479
f942dc25 480 if (likely(!newerr)) {
e9ce7cb6 481 xenvif_grant_handle_set(queue,
9074ce24
ZK
482 pending_idx,
483 gop_map->handle);
f942dc25 484 /* Had a previous error? Invalidate this fragment. */
1b860da0 485 if (unlikely(err)) {
e9ce7cb6 486 xenvif_idx_unmap(queue, pending_idx);
1b860da0
ZK
487 /* If the mapping of the first frag was OK, but
488 * the header's copy failed, and they are
489 * sharing a slot, send an error
490 */
491 if (i == 0 && sharedslot)
492 xenvif_idx_release(queue, pending_idx,
493 XEN_NETIF_RSP_ERROR);
494 else
495 xenvif_idx_release(queue, pending_idx,
496 XEN_NETIF_RSP_OKAY);
497 }
f942dc25
IC
498 continue;
499 }
500
501 /* Error on this fragment: respond to client with an error. */
bdab8275 502 if (net_ratelimit())
e9ce7cb6 503 netdev_dbg(queue->vif->dev,
00aefceb 504 "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
bdab8275
ZK
505 i,
506 gop_map->status,
507 pending_idx,
508 gop_map->ref);
1b860da0 509
e9ce7cb6 510 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
f942dc25
IC
511
512 /* Not the first error? Preceding frags already invalidated. */
513 if (err)
514 continue;
1b860da0
ZK
515
516 /* First error: if the header haven't shared a slot with the
517 * first frag, release it as well.
518 */
519 if (!sharedslot)
520 xenvif_idx_release(queue,
521 XENVIF_TX_CB(skb)->pending_idx,
522 XEN_NETIF_RSP_OKAY);
523
524 /* Invalidate preceding fragments of this skb. */
bdab8275 525 for (j = 0; j < i; j++) {
5ccb3ea7 526 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
e9ce7cb6 527 xenvif_idx_unmap(queue, pending_idx);
1b860da0
ZK
528 xenvif_idx_release(queue, pending_idx,
529 XEN_NETIF_RSP_OKAY);
f942dc25
IC
530 }
531
1a998d3e
ZK
532 /* And if we found the error while checking the frag_list, unmap
533 * the first skb's frags
534 */
535 if (first_shinfo) {
536 for (j = 0; j < first_shinfo->nr_frags; j++) {
537 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
538 xenvif_idx_unmap(queue, pending_idx);
1b860da0
ZK
539 xenvif_idx_release(queue, pending_idx,
540 XEN_NETIF_RSP_OKAY);
1a998d3e 541 }
f942dc25
IC
542 }
543
544 /* Remember the error: invalidate all subsequent fragments. */
545 err = newerr;
546 }
547
1a998d3e
ZK
548 if (skb_has_frag_list(skb) && !first_shinfo) {
549 first_shinfo = skb_shinfo(skb);
550 shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
e3377f36 551 nr_frags = shinfo->nr_frags;
e3377f36
ZK
552
553 goto check_frags;
554 }
555
bdab8275 556 *gopp_map = gop_map;
f942dc25
IC
557 return err;
558}
559
e9ce7cb6 560static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
f942dc25
IC
561{
562 struct skb_shared_info *shinfo = skb_shinfo(skb);
563 int nr_frags = shinfo->nr_frags;
564 int i;
f53c3fe8
ZK
565 u16 prev_pending_idx = INVALID_PENDING_IDX;
566
f942dc25
IC
567 for (i = 0; i < nr_frags; i++) {
568 skb_frag_t *frag = shinfo->frags + i;
569 struct xen_netif_tx_request *txp;
ea066ad1
IC
570 struct page *page;
571 u16 pending_idx;
f942dc25 572
ea066ad1 573 pending_idx = frag_get_pending_idx(frag);
f942dc25 574
f53c3fe8 575 /* If this is not the first frag, chain it to the previous*/
bdab8275 576 if (prev_pending_idx == INVALID_PENDING_IDX)
f53c3fe8 577 skb_shinfo(skb)->destructor_arg =
e9ce7cb6 578 &callback_param(queue, pending_idx);
bdab8275 579 else
e9ce7cb6
WL
580 callback_param(queue, prev_pending_idx).ctx =
581 &callback_param(queue, pending_idx);
f53c3fe8 582
e9ce7cb6 583 callback_param(queue, pending_idx).ctx = NULL;
f53c3fe8
ZK
584 prev_pending_idx = pending_idx;
585
e9ce7cb6
WL
586 txp = &queue->pending_tx_info[pending_idx].req;
587 page = virt_to_page(idx_to_kaddr(queue, pending_idx));
ea066ad1 588 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
f942dc25
IC
589 skb->len += txp->size;
590 skb->data_len += txp->size;
591 skb->truesize += txp->size;
592
f53c3fe8 593 /* Take an extra reference to offset network stack's put_page */
e9ce7cb6 594 get_page(queue->mmap_pages[pending_idx]);
f942dc25
IC
595 }
596}
597
e9ce7cb6 598static int xenvif_get_extras(struct xenvif_queue *queue,
562abd39
PD
599 struct xen_netif_extra_info *extras,
600 unsigned int *extra_count,
601 int work_to_do)
f942dc25
IC
602{
603 struct xen_netif_extra_info extra;
e9ce7cb6 604 RING_IDX cons = queue->tx.req_cons;
f942dc25
IC
605
606 do {
607 if (unlikely(work_to_do-- <= 0)) {
e9ce7cb6
WL
608 netdev_err(queue->vif->dev, "Missing extra info\n");
609 xenvif_fatal_tx_err(queue->vif);
f942dc25
IC
610 return -EBADR;
611 }
612
68a33bfd 613 RING_COPY_REQUEST(&queue->tx, cons, &extra);
562abd39
PD
614
615 queue->tx.req_cons = ++cons;
616 (*extra_count)++;
617
f942dc25
IC
618 if (unlikely(!extra.type ||
619 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
e9ce7cb6 620 netdev_err(queue->vif->dev,
f942dc25 621 "Invalid extra type: %d\n", extra.type);
e9ce7cb6 622 xenvif_fatal_tx_err(queue->vif);
f942dc25
IC
623 return -EINVAL;
624 }
625
626 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
f942dc25
IC
627 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
628
629 return work_to_do;
630}
631
7376419a
WL
632static int xenvif_set_skb_gso(struct xenvif *vif,
633 struct sk_buff *skb,
634 struct xen_netif_extra_info *gso)
f942dc25
IC
635{
636 if (!gso->u.gso.size) {
48856286 637 netdev_err(vif->dev, "GSO size must not be zero.\n");
7376419a 638 xenvif_fatal_tx_err(vif);
f942dc25
IC
639 return -EINVAL;
640 }
641
a9468587
PD
642 switch (gso->u.gso.type) {
643 case XEN_NETIF_GSO_TYPE_TCPV4:
644 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
645 break;
646 case XEN_NETIF_GSO_TYPE_TCPV6:
647 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
648 break;
649 default:
48856286 650 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
7376419a 651 xenvif_fatal_tx_err(vif);
f942dc25
IC
652 return -EINVAL;
653 }
654
655 skb_shinfo(skb)->gso_size = gso->u.gso.size;
b89587a7 656 /* gso_segs will be calculated later */
f942dc25
IC
657
658 return 0;
659}
660
e9ce7cb6 661static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
2eba61d5 662{
2721637c 663 bool recalculate_partial_csum = false;
2eba61d5
PD
664
665 /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
666 * peers can fail to set NETRXF_csum_blank when sending a GSO
667 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
668 * recalculate the partial checksum.
669 */
670 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
e9ce7cb6 671 queue->stats.rx_gso_checksum_fixup++;
2eba61d5 672 skb->ip_summed = CHECKSUM_PARTIAL;
2721637c 673 recalculate_partial_csum = true;
2eba61d5
PD
674 }
675
676 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
677 if (skb->ip_summed != CHECKSUM_PARTIAL)
678 return 0;
679
2721637c 680 return skb_checksum_setup(skb, recalculate_partial_csum);
2eba61d5
PD
681}
682
e9ce7cb6 683static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
f942dc25 684{
059dfa6a 685 u64 now = get_jiffies_64();
e9ce7cb6
WL
686 u64 next_credit = queue->credit_window_start +
687 msecs_to_jiffies(queue->credit_usec / 1000);
f942dc25
IC
688
689 /* Timer could already be pending in rare cases. */
dfa523ae
WL
690 if (timer_pending(&queue->credit_timeout)) {
691 queue->rate_limited = true;
f942dc25 692 return true;
dfa523ae 693 }
f942dc25
IC
694
695 /* Passed the point where we can replenish credit? */
059dfa6a 696 if (time_after_eq64(now, next_credit)) {
e9ce7cb6
WL
697 queue->credit_window_start = now;
698 tx_add_credit(queue);
f942dc25
IC
699 }
700
701 /* Still too big to send right now? Set a callback. */
e9ce7cb6 702 if (size > queue->remaining_credit) {
e9ce7cb6 703 mod_timer(&queue->credit_timeout,
f942dc25 704 next_credit);
e9ce7cb6 705 queue->credit_window_start = next_credit;
dfa523ae 706 queue->rate_limited = true;
f942dc25
IC
707
708 return true;
709 }
710
711 return false;
712}
713
210c34dc
PD
714/* No locking is required in xenvif_mcast_add/del() as they are
715 * only ever invoked from NAPI poll. An RCU list is used because
716 * xenvif_mcast_match() is called asynchronously, during start_xmit.
717 */
718
719static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
720{
721 struct xenvif_mcast_addr *mcast;
722
723 if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
724 if (net_ratelimit())
725 netdev_err(vif->dev,
726 "Too many multicast addresses\n");
727 return -ENOSPC;
728 }
729
730 mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
731 if (!mcast)
732 return -ENOMEM;
733
734 ether_addr_copy(mcast->addr, addr);
735 list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
736 vif->fe_mcast_count++;
737
738 return 0;
739}
740
741static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
742{
743 struct xenvif_mcast_addr *mcast;
744
745 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
746 if (ether_addr_equal(addr, mcast->addr)) {
747 --vif->fe_mcast_count;
748 list_del_rcu(&mcast->entry);
749 kfree_rcu(mcast, rcu);
750 break;
751 }
752 }
753}
754
755bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
756{
757 struct xenvif_mcast_addr *mcast;
758
759 rcu_read_lock();
760 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
761 if (ether_addr_equal(addr, mcast->addr)) {
762 rcu_read_unlock();
763 return true;
764 }
765 }
766 rcu_read_unlock();
767
768 return false;
769}
770
771void xenvif_mcast_addr_list_free(struct xenvif *vif)
772{
773 /* No need for locking or RCU here. NAPI poll and TX queue
774 * are stopped.
775 */
776 while (!list_empty(&vif->fe_mcast_addr)) {
777 struct xenvif_mcast_addr *mcast;
778
779 mcast = list_first_entry(&vif->fe_mcast_addr,
780 struct xenvif_mcast_addr,
781 entry);
782 --vif->fe_mcast_count;
783 list_del(&mcast->entry);
784 kfree(mcast);
785 }
786}
787
e9ce7cb6 788static void xenvif_tx_build_gops(struct xenvif_queue *queue,
bdab8275
ZK
789 int budget,
790 unsigned *copy_ops,
791 unsigned *map_ops)
f942dc25 792{
2475b225
RL
793 struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
794 struct sk_buff *skb, *nskb;
f942dc25 795 int ret;
2475b225 796 unsigned int frag_overflow;
f942dc25 797
e9ce7cb6 798 while (skb_queue_len(&queue->tx_queue) < budget) {
f942dc25 799 struct xen_netif_tx_request txreq;
37641494 800 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
f942dc25 801 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
562abd39 802 unsigned int extra_count;
f942dc25
IC
803 u16 pending_idx;
804 RING_IDX idx;
805 int work_to_do;
806 unsigned int data_len;
807 pending_ring_idx_t index;
808
e9ce7cb6 809 if (queue->tx.sring->req_prod - queue->tx.req_cons >
48856286 810 XEN_NETIF_TX_RING_SIZE) {
e9ce7cb6 811 netdev_err(queue->vif->dev,
48856286
IC
812 "Impossible number of requests. "
813 "req_prod %d, req_cons %d, size %ld\n",
e9ce7cb6 814 queue->tx.sring->req_prod, queue->tx.req_cons,
48856286 815 XEN_NETIF_TX_RING_SIZE);
e9ce7cb6 816 xenvif_fatal_tx_err(queue->vif);
e9d8b2c2 817 break;
48856286
IC
818 }
819
e9ce7cb6 820 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
b3f980bd
WL
821 if (!work_to_do)
822 break;
f942dc25 823
e9ce7cb6 824 idx = queue->tx.req_cons;
f942dc25 825 rmb(); /* Ensure that we see the request before we copy it. */
68a33bfd 826 RING_COPY_REQUEST(&queue->tx, idx, &txreq);
f942dc25
IC
827
828 /* Credit-based scheduling. */
e9ce7cb6
WL
829 if (txreq.size > queue->remaining_credit &&
830 tx_credit_exceeded(queue, txreq.size))
b3f980bd 831 break;
f942dc25 832
e9ce7cb6 833 queue->remaining_credit -= txreq.size;
f942dc25
IC
834
835 work_to_do--;
e9ce7cb6 836 queue->tx.req_cons = ++idx;
f942dc25
IC
837
838 memset(extras, 0, sizeof(extras));
562abd39 839 extra_count = 0;
f942dc25 840 if (txreq.flags & XEN_NETTXF_extra_info) {
e9ce7cb6 841 work_to_do = xenvif_get_extras(queue, extras,
562abd39 842 &extra_count,
7376419a 843 work_to_do);
e9ce7cb6 844 idx = queue->tx.req_cons;
48856286 845 if (unlikely(work_to_do < 0))
b3f980bd 846 break;
f942dc25
IC
847 }
848
210c34dc
PD
849 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
850 struct xen_netif_extra_info *extra;
851
852 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
853 ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
854
562abd39 855 make_tx_response(queue, &txreq, extra_count,
210c34dc
PD
856 (ret == 0) ?
857 XEN_NETIF_RSP_OKAY :
858 XEN_NETIF_RSP_ERROR);
859 push_tx_responses(queue);
860 continue;
861 }
862
863 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
864 struct xen_netif_extra_info *extra;
865
866 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
867 xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
868
562abd39
PD
869 make_tx_response(queue, &txreq, extra_count,
870 XEN_NETIF_RSP_OKAY);
210c34dc
PD
871 push_tx_responses(queue);
872 continue;
873 }
874
562abd39
PD
875 ret = xenvif_count_requests(queue, &txreq, extra_count,
876 txfrags, work_to_do);
48856286 877 if (unlikely(ret < 0))
b3f980bd 878 break;
48856286 879
f942dc25
IC
880 idx += ret;
881
882 if (unlikely(txreq.size < ETH_HLEN)) {
e9ce7cb6 883 netdev_dbg(queue->vif->dev,
f942dc25 884 "Bad packet size: %d\n", txreq.size);
562abd39 885 xenvif_tx_err(queue, &txreq, extra_count, idx);
b3f980bd 886 break;
f942dc25
IC
887 }
888
889 /* No crossing a page as the payload mustn't fragment. */
d0089e8a 890 if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
e9ce7cb6 891 netdev_err(queue->vif->dev,
68946159 892 "txreq.offset: %u, size: %u, end: %lu\n",
f942dc25 893 txreq.offset, txreq.size,
d0089e8a 894 (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
e9ce7cb6 895 xenvif_fatal_tx_err(queue->vif);
b3f980bd 896 break;
f942dc25
IC
897 }
898
e9ce7cb6
WL
899 index = pending_index(queue->pending_cons);
900 pending_idx = queue->pending_ring[index];
f942dc25 901
7e5d7753 902 data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
37641494 903 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
7e5d7753 904 XEN_NETBACK_TX_COPY_LEN : txreq.size;
f942dc25 905
e3377f36 906 skb = xenvif_alloc_skb(data_len);
f942dc25 907 if (unlikely(skb == NULL)) {
e9ce7cb6 908 netdev_dbg(queue->vif->dev,
f942dc25 909 "Can't allocate a skb in start_xmit.\n");
562abd39 910 xenvif_tx_err(queue, &txreq, extra_count, idx);
f942dc25
IC
911 break;
912 }
913
2475b225
RL
914 skb_shinfo(skb)->nr_frags = ret;
915 if (data_len < txreq.size)
916 skb_shinfo(skb)->nr_frags++;
917 /* At this point shinfo->nr_frags is in fact the number of
918 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
919 */
920 frag_overflow = 0;
921 nskb = NULL;
922 if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
923 frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
924 BUG_ON(frag_overflow > MAX_SKB_FRAGS);
925 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
926 nskb = xenvif_alloc_skb(0);
927 if (unlikely(nskb == NULL)) {
928 kfree_skb(skb);
562abd39 929 xenvif_tx_err(queue, &txreq, extra_count, idx);
2475b225
RL
930 if (net_ratelimit())
931 netdev_err(queue->vif->dev,
932 "Can't allocate the frag_list skb.\n");
933 break;
934 }
935 }
936
f942dc25
IC
937 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
938 struct xen_netif_extra_info *gso;
939 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
940
e9ce7cb6 941 if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
7376419a 942 /* Failure in xenvif_set_skb_gso is fatal. */
f942dc25 943 kfree_skb(skb);
2475b225 944 kfree_skb(nskb);
b3f980bd 945 break;
f942dc25
IC
946 }
947 }
948
c2d09fde
PD
949 if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) {
950 struct xen_netif_extra_info *extra;
951 enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
952
953 extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
954
955 switch (extra->u.hash.type) {
956 case _XEN_NETIF_CTRL_HASH_TYPE_IPV4:
957 case _XEN_NETIF_CTRL_HASH_TYPE_IPV6:
958 type = PKT_HASH_TYPE_L3;
959 break;
960
961 case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP:
962 case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP:
963 type = PKT_HASH_TYPE_L4;
964 break;
965
966 default:
967 break;
968 }
969
970 if (type != PKT_HASH_TYPE_NONE)
971 skb_set_hash(skb,
972 *(u32 *)extra->u.hash.value,
973 type);
974 }
975
8f13dd96 976 XENVIF_TX_CB(skb)->pending_idx = pending_idx;
f942dc25
IC
977
978 __skb_put(skb, data_len);
e9ce7cb6
WL
979 queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
980 queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
981 queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
bdab8275 982
e9ce7cb6 983 queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
0df4f266 984 virt_to_gfn(skb->data);
e9ce7cb6
WL
985 queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
986 queue->tx_copy_ops[*copy_ops].dest.offset =
d0089e8a 987 offset_in_page(skb->data) & ~XEN_PAGE_MASK;
bdab8275 988
e9ce7cb6
WL
989 queue->tx_copy_ops[*copy_ops].len = data_len;
990 queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
bdab8275
ZK
991
992 (*copy_ops)++;
f942dc25 993
f942dc25 994 if (data_len < txreq.size) {
ea066ad1
IC
995 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
996 pending_idx);
562abd39
PD
997 xenvif_tx_create_map_op(queue, pending_idx, &txreq,
998 extra_count, gop);
bdab8275 999 gop++;
f942dc25 1000 } else {
ea066ad1
IC
1001 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1002 INVALID_PENDING_IDX);
562abd39
PD
1003 memcpy(&queue->pending_tx_info[pending_idx].req,
1004 &txreq, sizeof(txreq));
1005 queue->pending_tx_info[pending_idx].extra_count =
1006 extra_count;
f942dc25
IC
1007 }
1008
e9ce7cb6 1009 queue->pending_cons++;
f942dc25 1010
2475b225
RL
1011 gop = xenvif_get_requests(queue, skb, txfrags, gop,
1012 frag_overflow, nskb);
f942dc25 1013
e9ce7cb6 1014 __skb_queue_tail(&queue->tx_queue, skb);
1e0b6eac 1015
e9ce7cb6 1016 queue->tx.req_cons = idx;
f942dc25 1017
e9ce7cb6
WL
1018 if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
1019 (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
f942dc25
IC
1020 break;
1021 }
1022
e9ce7cb6 1023 (*map_ops) = gop - queue->tx_map_ops;
bdab8275 1024 return;
f942dc25
IC
1025}
1026
e3377f36
ZK
1027/* Consolidate skb with a frag_list into a brand new one with local pages on
1028 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1029 */
e9ce7cb6 1030static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
e3377f36
ZK
1031{
1032 unsigned int offset = skb_headlen(skb);
1033 skb_frag_t frags[MAX_SKB_FRAGS];
49d9991a 1034 int i, f;
e3377f36
ZK
1035 struct ubuf_info *uarg;
1036 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1037
e9ce7cb6
WL
1038 queue->stats.tx_zerocopy_sent += 2;
1039 queue->stats.tx_frag_overflow++;
e3377f36 1040
e9ce7cb6 1041 xenvif_fill_frags(queue, nskb);
e3377f36
ZK
1042 /* Subtract frags size, we will correct it later */
1043 skb->truesize -= skb->data_len;
1044 skb->len += nskb->len;
1045 skb->data_len += nskb->len;
1046
1047 /* create a brand new frags array and coalesce there */
1048 for (i = 0; offset < skb->len; i++) {
1049 struct page *page;
1050 unsigned int len;
1051
1052 BUG_ON(i >= MAX_SKB_FRAGS);
44cc8ed1 1053 page = alloc_page(GFP_ATOMIC);
e3377f36
ZK
1054 if (!page) {
1055 int j;
1056 skb->truesize += skb->data_len;
1057 for (j = 0; j < i; j++)
d7840976 1058 put_page(skb_frag_page(&frags[j]));
e3377f36
ZK
1059 return -ENOMEM;
1060 }
1061
1062 if (offset + PAGE_SIZE < skb->len)
1063 len = PAGE_SIZE;
1064 else
1065 len = skb->len - offset;
1066 if (skb_copy_bits(skb, offset, page_address(page), len))
1067 BUG();
1068
1069 offset += len;
d7840976 1070 __skb_frag_set_page(&frags[i], page);
e3377f36
ZK
1071 frags[i].page_offset = 0;
1072 skb_frag_size_set(&frags[i], len);
1073 }
49d9991a
DV
1074
1075 /* Release all the original (foreign) frags. */
1076 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1077 skb_frag_unref(skb, f);
e3377f36 1078 uarg = skb_shinfo(skb)->destructor_arg;
a64bd934
WL
1079 /* increase inflight counter to offset decrement in callback */
1080 atomic_inc(&queue->inflight_packets);
e3377f36
ZK
1081 uarg->callback(uarg, true);
1082 skb_shinfo(skb)->destructor_arg = NULL;
1083
b0c21bad
DV
1084 /* Fill the skb with the new (local) frags. */
1085 memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
1086 skb_shinfo(skb)->nr_frags = i;
1087 skb->truesize += i * PAGE_SIZE;
e3377f36
ZK
1088
1089 return 0;
1090}
b3f980bd 1091
e9ce7cb6 1092static int xenvif_tx_submit(struct xenvif_queue *queue)
f942dc25 1093{
e9ce7cb6
WL
1094 struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1095 struct gnttab_copy *gop_copy = queue->tx_copy_ops;
f942dc25 1096 struct sk_buff *skb;
b3f980bd 1097 int work_done = 0;
f942dc25 1098
e9ce7cb6 1099 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
f942dc25 1100 struct xen_netif_tx_request *txp;
f942dc25
IC
1101 u16 pending_idx;
1102 unsigned data_len;
1103
8f13dd96 1104 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
e9ce7cb6 1105 txp = &queue->pending_tx_info[pending_idx].req;
f942dc25
IC
1106
1107 /* Check the remap error code. */
e9ce7cb6 1108 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
b42cc6e4
ZK
1109 /* If there was an error, xenvif_tx_check_gop is
1110 * expected to release all the frags which were mapped,
1111 * so kfree_skb shouldn't do it again
1112 */
f942dc25 1113 skb_shinfo(skb)->nr_frags = 0;
b42cc6e4
ZK
1114 if (skb_has_frag_list(skb)) {
1115 struct sk_buff *nskb =
1116 skb_shinfo(skb)->frag_list;
1117 skb_shinfo(nskb)->nr_frags = 0;
1118 }
f942dc25
IC
1119 kfree_skb(skb);
1120 continue;
1121 }
1122
1123 data_len = skb->len;
e9ce7cb6 1124 callback_param(queue, pending_idx).ctx = NULL;
f942dc25
IC
1125 if (data_len < txp->size) {
1126 /* Append the packet payload as a fragment. */
1127 txp->offset += data_len;
1128 txp->size -= data_len;
1129 } else {
1130 /* Schedule a response immediately. */
e9ce7cb6 1131 xenvif_idx_release(queue, pending_idx,
bdab8275 1132 XEN_NETIF_RSP_OKAY);
f942dc25
IC
1133 }
1134
1135 if (txp->flags & XEN_NETTXF_csum_blank)
1136 skb->ip_summed = CHECKSUM_PARTIAL;
1137 else if (txp->flags & XEN_NETTXF_data_validated)
1138 skb->ip_summed = CHECKSUM_UNNECESSARY;
1139
e9ce7cb6 1140 xenvif_fill_frags(queue, skb);
f942dc25 1141
e3377f36 1142 if (unlikely(skb_has_frag_list(skb))) {
99e87f56
ID
1143 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1144 xenvif_skb_zerocopy_prepare(queue, nskb);
e9ce7cb6 1145 if (xenvif_handle_frag_list(queue, skb)) {
e3377f36 1146 if (net_ratelimit())
e9ce7cb6 1147 netdev_err(queue->vif->dev,
e3377f36 1148 "Not enough memory to consolidate frag_list!\n");
a64bd934 1149 xenvif_skb_zerocopy_prepare(queue, skb);
e3377f36
ZK
1150 kfree_skb(skb);
1151 continue;
1152 }
99e87f56
ID
1153 /* Copied all the bits from the frag list -- free it. */
1154 skb_frag_list_init(skb);
1155 kfree_skb(nskb);
e3377f36
ZK
1156 }
1157
e9ce7cb6 1158 skb->dev = queue->vif->dev;
f942dc25 1159 skb->protocol = eth_type_trans(skb, skb->dev);
f9ca8f74 1160 skb_reset_network_header(skb);
f942dc25 1161
e9ce7cb6
WL
1162 if (checksum_setup(queue, skb)) {
1163 netdev_dbg(queue->vif->dev,
f942dc25 1164 "Can't setup checksum in net_tx_action\n");
f53c3fe8
ZK
1165 /* We have to set this flag to trigger the callback */
1166 if (skb_shinfo(skb)->destructor_arg)
a64bd934 1167 xenvif_skb_zerocopy_prepare(queue, skb);
f942dc25
IC
1168 kfree_skb(skb);
1169 continue;
1170 }
1171
d2aa125d 1172 skb_probe_transport_header(skb);
f9ca8f74 1173
b89587a7
PD
1174 /* If the packet is GSO then we will have just set up the
1175 * transport header offset in checksum_setup so it's now
1176 * straightforward to calculate gso_segs.
1177 */
1178 if (skb_is_gso(skb)) {
d2aa125d
MM
1179 int mss, hdrlen;
1180
1181 /* GSO implies having the L4 header. */
1182 WARN_ON_ONCE(!skb_transport_header_was_set(skb));
1183 if (unlikely(!skb_transport_header_was_set(skb))) {
1184 kfree_skb(skb);
1185 continue;
1186 }
1187
1188 mss = skb_shinfo(skb)->gso_size;
1189 hdrlen = skb_transport_header(skb) -
b89587a7
PD
1190 skb_mac_header(skb) +
1191 tcp_hdrlen(skb);
1192
1193 skb_shinfo(skb)->gso_segs =
1194 DIV_ROUND_UP(skb->len - hdrlen, mss);
1195 }
1196
e9ce7cb6
WL
1197 queue->stats.rx_bytes += skb->len;
1198 queue->stats.rx_packets++;
f942dc25 1199
b3f980bd
WL
1200 work_done++;
1201
f53c3fe8
ZK
1202 /* Set this flag right before netif_receive_skb, otherwise
1203 * someone might think this packet already left netback, and
1204 * do a skb_copy_ubufs while we are still in control of the
1205 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1206 */
1bb332af 1207 if (skb_shinfo(skb)->destructor_arg) {
a64bd934 1208 xenvif_skb_zerocopy_prepare(queue, skb);
e9ce7cb6 1209 queue->stats.tx_zerocopy_sent++;
1bb332af 1210 }
f53c3fe8 1211
b3f980bd 1212 netif_receive_skb(skb);
f942dc25 1213 }
b3f980bd
WL
1214
1215 return work_done;
f942dc25
IC
1216}
1217
3e2234b3
ZK
1218void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1219{
f53c3fe8
ZK
1220 unsigned long flags;
1221 pending_ring_idx_t index;
e9ce7cb6 1222 struct xenvif_queue *queue = ubuf_to_queue(ubuf);
f53c3fe8
ZK
1223
1224 /* This is the only place where we grab this lock, to protect callbacks
1225 * from each other.
1226 */
e9ce7cb6 1227 spin_lock_irqsave(&queue->callback_lock, flags);
f53c3fe8
ZK
1228 do {
1229 u16 pending_idx = ubuf->desc;
1230 ubuf = (struct ubuf_info *) ubuf->ctx;
e9ce7cb6 1231 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
f53c3fe8 1232 MAX_PENDING_REQS);
e9ce7cb6
WL
1233 index = pending_index(queue->dealloc_prod);
1234 queue->dealloc_ring[index] = pending_idx;
f53c3fe8
ZK
1235 /* Sync with xenvif_tx_dealloc_action:
1236 * insert idx then incr producer.
1237 */
1238 smp_wmb();
e9ce7cb6 1239 queue->dealloc_prod++;
f53c3fe8 1240 } while (ubuf);
e9ce7cb6 1241 spin_unlock_irqrestore(&queue->callback_lock, flags);
f53c3fe8 1242
1bb332af 1243 if (likely(zerocopy_success))
e9ce7cb6 1244 queue->stats.tx_zerocopy_success++;
1bb332af 1245 else
e9ce7cb6 1246 queue->stats.tx_zerocopy_fail++;
a64bd934 1247 xenvif_skb_zerocopy_complete(queue);
f53c3fe8
ZK
1248}
1249
e9ce7cb6 1250static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
f53c3fe8
ZK
1251{
1252 struct gnttab_unmap_grant_ref *gop;
1253 pending_ring_idx_t dc, dp;
1254 u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1255 unsigned int i = 0;
1256
e9ce7cb6
WL
1257 dc = queue->dealloc_cons;
1258 gop = queue->tx_unmap_ops;
f53c3fe8
ZK
1259
1260 /* Free up any grants we have finished using */
1261 do {
e9ce7cb6 1262 dp = queue->dealloc_prod;
f53c3fe8
ZK
1263
1264 /* Ensure we see all indices enqueued by all
1265 * xenvif_zerocopy_callback().
1266 */
1267 smp_rmb();
1268
1269 while (dc != dp) {
50c2e4dd 1270 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
f53c3fe8 1271 pending_idx =
e9ce7cb6 1272 queue->dealloc_ring[pending_index(dc++)];
f53c3fe8 1273
50c2e4dd 1274 pending_idx_release[gop - queue->tx_unmap_ops] =
f53c3fe8 1275 pending_idx;
50c2e4dd 1276 queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
e9ce7cb6 1277 queue->mmap_pages[pending_idx];
f53c3fe8 1278 gnttab_set_unmap_op(gop,
e9ce7cb6 1279 idx_to_kaddr(queue, pending_idx),
f53c3fe8 1280 GNTMAP_host_map,
e9ce7cb6
WL
1281 queue->grant_tx_handle[pending_idx]);
1282 xenvif_grant_handle_reset(queue, pending_idx);
f53c3fe8
ZK
1283 ++gop;
1284 }
1285
e9ce7cb6 1286 } while (dp != queue->dealloc_prod);
f53c3fe8 1287
e9ce7cb6 1288 queue->dealloc_cons = dc;
f53c3fe8 1289
e9ce7cb6 1290 if (gop - queue->tx_unmap_ops > 0) {
f53c3fe8 1291 int ret;
e9ce7cb6 1292 ret = gnttab_unmap_refs(queue->tx_unmap_ops,
f53c3fe8 1293 NULL,
e9ce7cb6
WL
1294 queue->pages_to_unmap,
1295 gop - queue->tx_unmap_ops);
f53c3fe8 1296 if (ret) {
68946159 1297 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
e9ce7cb6
WL
1298 gop - queue->tx_unmap_ops, ret);
1299 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
f53c3fe8 1300 if (gop[i].status != GNTST_okay)
e9ce7cb6 1301 netdev_err(queue->vif->dev,
68946159 1302 " host_addr: 0x%llx handle: 0x%x status: %d\n",
f53c3fe8
ZK
1303 gop[i].host_addr,
1304 gop[i].handle,
1305 gop[i].status);
1306 }
1307 BUG();
1308 }
1309 }
1310
e9ce7cb6
WL
1311 for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1312 xenvif_idx_release(queue, pending_idx_release[i],
f53c3fe8 1313 XEN_NETIF_RSP_OKAY);
3e2234b3
ZK
1314}
1315
f53c3fe8 1316
f942dc25 1317/* Called after netfront has transmitted */
e9ce7cb6 1318int xenvif_tx_action(struct xenvif_queue *queue, int budget)
f942dc25 1319{
bdab8275 1320 unsigned nr_mops, nr_cops = 0;
f53c3fe8 1321 int work_done, ret;
f942dc25 1322
e9ce7cb6 1323 if (unlikely(!tx_work_todo(queue)))
b3f980bd
WL
1324 return 0;
1325
e9ce7cb6 1326 xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
f942dc25 1327
bdab8275 1328 if (nr_cops == 0)
b3f980bd
WL
1329 return 0;
1330
e9ce7cb6 1331 gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
bdab8275 1332 if (nr_mops != 0) {
e9ce7cb6 1333 ret = gnttab_map_refs(queue->tx_map_ops,
bdab8275 1334 NULL,
e9ce7cb6 1335 queue->pages_to_map,
bdab8275
ZK
1336 nr_mops);
1337 BUG_ON(ret);
1338 }
f942dc25 1339
e9ce7cb6 1340 work_done = xenvif_tx_submit(queue);
f942dc25 1341
b3f980bd 1342 return work_done;
f942dc25
IC
1343}
1344
e9ce7cb6 1345static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
7376419a 1346 u8 status)
f942dc25 1347{
f942dc25 1348 struct pending_tx_info *pending_tx_info;
f53c3fe8 1349 pending_ring_idx_t index;
f53c3fe8 1350 unsigned long flags;
2810e5b9 1351
e9ce7cb6 1352 pending_tx_info = &queue->pending_tx_info[pending_idx];
7fbb9d84 1353
e9ce7cb6 1354 spin_lock_irqsave(&queue->response_lock, flags);
7fbb9d84 1355
562abd39
PD
1356 make_tx_response(queue, &pending_tx_info->req,
1357 pending_tx_info->extra_count, status);
7fbb9d84
DV
1358
1359 /* Release the pending index before pusing the Tx response so
1360 * its available before a new Tx request is pushed by the
1361 * frontend.
1362 */
1363 index = pending_index(queue->pending_prod++);
e9ce7cb6 1364 queue->pending_ring[index] = pending_idx;
7fbb9d84 1365
c8a4d299 1366 push_tx_responses(queue);
7fbb9d84 1367
e9ce7cb6 1368 spin_unlock_irqrestore(&queue->response_lock, flags);
f942dc25
IC
1369}
1370
2810e5b9 1371
e9ce7cb6 1372static void make_tx_response(struct xenvif_queue *queue,
f942dc25 1373 struct xen_netif_tx_request *txp,
562abd39 1374 unsigned int extra_count,
f942dc25
IC
1375 s8 st)
1376{
e9ce7cb6 1377 RING_IDX i = queue->tx.rsp_prod_pvt;
f942dc25 1378 struct xen_netif_tx_response *resp;
f942dc25 1379
e9ce7cb6 1380 resp = RING_GET_RESPONSE(&queue->tx, i);
f942dc25
IC
1381 resp->id = txp->id;
1382 resp->status = st;
1383
562abd39 1384 while (extra_count-- != 0)
e9ce7cb6 1385 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
f942dc25 1386
e9ce7cb6 1387 queue->tx.rsp_prod_pvt = ++i;
f942dc25
IC
1388}
1389
c8a4d299
DV
1390static void push_tx_responses(struct xenvif_queue *queue)
1391{
1392 int notify;
1393
1394 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1395 if (notify)
1396 notify_remote_via_irq(queue->tx_irq);
1397}
1398
e9ce7cb6 1399void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
f53c3fe8
ZK
1400{
1401 int ret;
1402 struct gnttab_unmap_grant_ref tx_unmap_op;
1403
1404 gnttab_set_unmap_op(&tx_unmap_op,
e9ce7cb6 1405 idx_to_kaddr(queue, pending_idx),
f53c3fe8 1406 GNTMAP_host_map,
e9ce7cb6
WL
1407 queue->grant_tx_handle[pending_idx]);
1408 xenvif_grant_handle_reset(queue, pending_idx);
f53c3fe8
ZK
1409
1410 ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
e9ce7cb6 1411 &queue->mmap_pages[pending_idx], 1);
7aceb47a 1412 if (ret) {
e9ce7cb6 1413 netdev_err(queue->vif->dev,
68946159 1414 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
7aceb47a
ZK
1415 ret,
1416 pending_idx,
1417 tx_unmap_op.host_addr,
1418 tx_unmap_op.handle,
1419 tx_unmap_op.status);
1420 BUG();
1421 }
f53c3fe8
ZK
1422}
1423
e9ce7cb6 1424static inline int tx_work_todo(struct xenvif_queue *queue)
f942dc25 1425{
e9ce7cb6 1426 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
f942dc25
IC
1427 return 1;
1428
1429 return 0;
1430}
1431
e9ce7cb6 1432static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
f53c3fe8 1433{
e9ce7cb6 1434 return queue->dealloc_cons != queue->dealloc_prod;
f53c3fe8
ZK
1435}
1436
4e15ee2c 1437void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
f942dc25 1438{
e9ce7cb6
WL
1439 if (queue->tx.sring)
1440 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1441 queue->tx.sring);
1442 if (queue->rx.sring)
1443 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1444 queue->rx.sring);
f942dc25
IC
1445}
1446
4e15ee2c
PD
1447int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
1448 grant_ref_t tx_ring_ref,
1449 grant_ref_t rx_ring_ref)
f942dc25 1450{
c9d63699 1451 void *addr;
f942dc25
IC
1452 struct xen_netif_tx_sring *txs;
1453 struct xen_netif_rx_sring *rxs;
1454
1455 int err = -ENOMEM;
1456
e9ce7cb6 1457 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
ccc9d90a 1458 &tx_ring_ref, 1, &addr);
c9d63699 1459 if (err)
f942dc25
IC
1460 goto err;
1461
c9d63699 1462 txs = (struct xen_netif_tx_sring *)addr;
d0089e8a 1463 BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
f942dc25 1464
e9ce7cb6 1465 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
ccc9d90a 1466 &rx_ring_ref, 1, &addr);
c9d63699 1467 if (err)
f942dc25 1468 goto err;
f942dc25 1469
c9d63699 1470 rxs = (struct xen_netif_rx_sring *)addr;
d0089e8a 1471 BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
f942dc25
IC
1472
1473 return 0;
1474
1475err:
4e15ee2c 1476 xenvif_unmap_frontend_data_rings(queue);
f942dc25
IC
1477 return err;
1478}
1479
a64bd934
WL
1480static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
1481{
1482 /* Dealloc thread must remain running until all inflight
1483 * packets complete.
1484 */
1485 return kthread_should_stop() &&
1486 !atomic_read(&queue->inflight_packets);
1487}
1488
f53c3fe8
ZK
1489int xenvif_dealloc_kthread(void *data)
1490{
e9ce7cb6 1491 struct xenvif_queue *queue = data;
f53c3fe8 1492
a64bd934 1493 for (;;) {
e9ce7cb6
WL
1494 wait_event_interruptible(queue->dealloc_wq,
1495 tx_dealloc_work_todo(queue) ||
a64bd934
WL
1496 xenvif_dealloc_kthread_should_stop(queue));
1497 if (xenvif_dealloc_kthread_should_stop(queue))
f53c3fe8
ZK
1498 break;
1499
e9ce7cb6 1500 xenvif_tx_dealloc_action(queue);
f53c3fe8
ZK
1501 cond_resched();
1502 }
1503
1504 /* Unmap anything remaining*/
e9ce7cb6
WL
1505 if (tx_dealloc_work_todo(queue))
1506 xenvif_tx_dealloc_action(queue);
f53c3fe8
ZK
1507
1508 return 0;
1509}
1510
4e15ee2c
PD
1511static void make_ctrl_response(struct xenvif *vif,
1512 const struct xen_netif_ctrl_request *req,
1513 u32 status, u32 data)
1514{
1515 RING_IDX idx = vif->ctrl.rsp_prod_pvt;
1516 struct xen_netif_ctrl_response rsp = {
1517 .id = req->id,
1518 .type = req->type,
1519 .status = status,
1520 .data = data,
1521 };
1522
1523 *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp;
1524 vif->ctrl.rsp_prod_pvt = ++idx;
1525}
1526
1527static void push_ctrl_response(struct xenvif *vif)
1528{
1529 int notify;
1530
1531 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify);
1532 if (notify)
1533 notify_remote_via_irq(vif->ctrl_irq);
1534}
1535
1536static void process_ctrl_request(struct xenvif *vif,
1537 const struct xen_netif_ctrl_request *req)
1538{
40d8abde
PD
1539 u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
1540 u32 data = 0;
1541
1542 switch (req->type) {
1543 case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM:
1544 status = xenvif_set_hash_alg(vif, req->data[0]);
1545 break;
1546
1547 case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS:
1548 status = xenvif_get_hash_flags(vif, &data);
1549 break;
1550
1551 case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS:
1552 status = xenvif_set_hash_flags(vif, req->data[0]);
1553 break;
1554
1555 case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY:
1556 status = xenvif_set_hash_key(vif, req->data[0],
1557 req->data[1]);
1558 break;
1559
1560 case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE:
1561 status = XEN_NETIF_CTRL_STATUS_SUCCESS;
1562 data = XEN_NETBK_MAX_HASH_MAPPING_SIZE;
1563 break;
1564
1565 case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE:
1566 status = xenvif_set_hash_mapping_size(vif,
1567 req->data[0]);
1568 break;
1569
1570 case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING:
1571 status = xenvif_set_hash_mapping(vif, req->data[0],
1572 req->data[1],
1573 req->data[2]);
1574 break;
1575
1576 default:
1577 break;
1578 }
1579
1580 make_ctrl_response(vif, req, status, data);
4e15ee2c
PD
1581 push_ctrl_response(vif);
1582}
1583
1584static void xenvif_ctrl_action(struct xenvif *vif)
1585{
1586 for (;;) {
1587 RING_IDX req_prod, req_cons;
1588
1589 req_prod = vif->ctrl.sring->req_prod;
1590 req_cons = vif->ctrl.req_cons;
1591
1592 /* Make sure we can see requests before we process them. */
1593 rmb();
1594
1595 if (req_cons == req_prod)
1596 break;
1597
1598 while (req_cons != req_prod) {
1599 struct xen_netif_ctrl_request req;
1600
1601 RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
1602 req_cons++;
1603
1604 process_ctrl_request(vif, &req);
1605 }
1606
1607 vif->ctrl.req_cons = req_cons;
1608 vif->ctrl.sring->req_event = req_cons + 1;
1609 }
1610}
1611
1612static bool xenvif_ctrl_work_todo(struct xenvif *vif)
1613{
1614 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
d3e2a25b 1615 return true;
4e15ee2c 1616
d3e2a25b 1617 return false;
4e15ee2c
PD
1618}
1619
0364a882 1620irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
4e15ee2c
PD
1621{
1622 struct xenvif *vif = data;
1623
0364a882
JG
1624 while (xenvif_ctrl_work_todo(vif))
1625 xenvif_ctrl_action(vif);
4e15ee2c 1626
0364a882 1627 return IRQ_HANDLED;
4e15ee2c
PD
1628}
1629
f942dc25
IC
1630static int __init netback_init(void)
1631{
f942dc25 1632 int rc = 0;
f942dc25 1633
2a14b244 1634 if (!xen_domain())
f942dc25
IC
1635 return -ENODEV;
1636
56dd5af9 1637 /* Allow as many queues as there are CPUs but max. 8 if user has not
4c82ac3c
WL
1638 * specified a value.
1639 */
1640 if (xenvif_max_queues == 0)
56dd5af9
JG
1641 xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
1642 num_online_cpus());
8d3d53b3 1643
37641494 1644 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
383eda32
JP
1645 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1646 fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
37641494 1647 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
2810e5b9
WL
1648 }
1649
f942dc25
IC
1650 rc = xenvif_xenbus_init();
1651 if (rc)
1652 goto failed_init;
1653
f51de243
ZK
1654#ifdef CONFIG_DEBUG_FS
1655 xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
1656 if (IS_ERR_OR_NULL(xen_netback_dbg_root))
1657 pr_warn("Init of debugfs returned %ld!\n",
1658 PTR_ERR(xen_netback_dbg_root));
1659#endif /* CONFIG_DEBUG_FS */
1660
f942dc25
IC
1661 return 0;
1662
1663failed_init:
f942dc25 1664 return rc;
f942dc25
IC
1665}
1666
1667module_init(netback_init);
1668
b103f358
WL
1669static void __exit netback_fini(void)
1670{
f51de243 1671#ifdef CONFIG_DEBUG_FS
aad06d11 1672 debugfs_remove_recursive(xen_netback_dbg_root);
f51de243 1673#endif /* CONFIG_DEBUG_FS */
b103f358 1674 xenvif_xenbus_fini();
b103f358
WL
1675}
1676module_exit(netback_fini);
1677
f942dc25 1678MODULE_LICENSE("Dual BSD/GPL");
f984cec6 1679MODULE_ALIAS("xen-backend:vif");