Commit | Line | Data |
---|---|---|
f942dc25 IC |
1 | /* |
2 | * Back-end of the driver for virtual network devices. This portion of the | |
3 | * driver exports a 'unified' network-device interface that can be accessed | |
4 | * by any operating system that implements a compatible front end. A | |
5 | * reference front-end implementation can be found in: | |
6 | * drivers/net/xen-netfront.c | |
7 | * | |
8 | * Copyright (c) 2002-2005, K A Fraser | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License version 2 | |
12 | * as published by the Free Software Foundation; or, when distributed | |
13 | * separately from the Linux kernel or incorporated into other | |
14 | * software packages, subject to the following license: | |
15 | * | |
16 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
17 | * of this source file (the "Software"), to deal in the Software without | |
18 | * restriction, including without limitation the rights to use, copy, modify, | |
19 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | |
20 | * and to permit persons to whom the Software is furnished to do so, subject to | |
21 | * the following conditions: | |
22 | * | |
23 | * The above copyright notice and this permission notice shall be included in | |
24 | * all copies or substantial portions of the Software. | |
25 | * | |
26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
27 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
28 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
29 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
30 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
31 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
32 | * IN THE SOFTWARE. | |
33 | */ | |
34 | ||
35 | #include "common.h" | |
36 | ||
37 | #include <linux/kthread.h> | |
38 | #include <linux/if_vlan.h> | |
39 | #include <linux/udp.h> | |
40 | ||
41 | #include <net/tcp.h> | |
42 | ||
ca981633 | 43 | #include <xen/xen.h> |
f942dc25 IC |
44 | #include <xen/events.h> |
45 | #include <xen/interface/memory.h> | |
46 | ||
47 | #include <asm/xen/hypercall.h> | |
48 | #include <asm/xen/page.h> | |
49 | ||
e1f00a69 WL |
50 | /* Provide an option to disable split event channels at load time as |
51 | * event channels are limited resource. Split event channels are | |
52 | * enabled by default. | |
53 | */ | |
54 | bool separate_tx_rx_irq = 1; | |
55 | module_param(separate_tx_rx_irq, bool, 0644); | |
56 | ||
2810e5b9 WL |
57 | /* |
58 | * This is the maximum slots a skb can have. If a guest sends a skb | |
59 | * which exceeds this limit it is considered malicious. | |
60 | */ | |
37641494 WL |
61 | #define FATAL_SKB_SLOTS_DEFAULT 20 |
62 | static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; | |
63 | module_param(fatal_skb_slots, uint, 0444); | |
64 | ||
65 | /* | |
66 | * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating | |
67 | * the maximum slots a valid packet can use. Now this value is defined | |
68 | * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by | |
69 | * all backend. | |
70 | */ | |
71 | #define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN | |
2810e5b9 WL |
72 | |
73 | typedef unsigned int pending_ring_idx_t; | |
74 | #define INVALID_PENDING_RING_IDX (~0U) | |
75 | ||
f942dc25 | 76 | struct pending_tx_info { |
2810e5b9 | 77 | struct xen_netif_tx_request req; /* coalesced tx request */ |
f942dc25 | 78 | struct xenvif *vif; |
2810e5b9 WL |
79 | pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX |
80 | * if it is head of one or more tx | |
81 | * reqs | |
82 | */ | |
f942dc25 | 83 | }; |
f942dc25 IC |
84 | |
85 | struct netbk_rx_meta { | |
86 | int id; | |
87 | int size; | |
88 | int gso_size; | |
89 | }; | |
90 | ||
91 | #define MAX_PENDING_REQS 256 | |
92 | ||
ea066ad1 IC |
93 | /* Discriminate from any valid pending_idx value. */ |
94 | #define INVALID_PENDING_IDX 0xFFFF | |
95 | ||
f942dc25 IC |
96 | #define MAX_BUFFER_OFFSET PAGE_SIZE |
97 | ||
98 | /* extra field used in struct page */ | |
99 | union page_ext { | |
100 | struct { | |
101 | #if BITS_PER_LONG < 64 | |
102 | #define IDX_WIDTH 8 | |
103 | #define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH) | |
104 | unsigned int group:GROUP_WIDTH; | |
105 | unsigned int idx:IDX_WIDTH; | |
106 | #else | |
107 | unsigned int group, idx; | |
108 | #endif | |
109 | } e; | |
110 | void *mapping; | |
111 | }; | |
112 | ||
113 | struct xen_netbk { | |
114 | wait_queue_head_t wq; | |
115 | struct task_struct *task; | |
116 | ||
117 | struct sk_buff_head rx_queue; | |
118 | struct sk_buff_head tx_queue; | |
119 | ||
120 | struct timer_list net_timer; | |
121 | ||
122 | struct page *mmap_pages[MAX_PENDING_REQS]; | |
123 | ||
124 | pending_ring_idx_t pending_prod; | |
125 | pending_ring_idx_t pending_cons; | |
126 | struct list_head net_schedule_list; | |
127 | ||
128 | /* Protect the net_schedule_list in netif. */ | |
129 | spinlock_t net_schedule_list_lock; | |
130 | ||
131 | atomic_t netfront_count; | |
132 | ||
133 | struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; | |
2810e5b9 WL |
134 | /* Coalescing tx requests before copying makes number of grant |
135 | * copy ops greater or equal to number of slots required. In | |
136 | * worst case a tx request consumes 2 gnttab_copy. | |
137 | */ | |
138 | struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS]; | |
f942dc25 IC |
139 | |
140 | u16 pending_ring[MAX_PENDING_REQS]; | |
141 | ||
142 | /* | |
143 | * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each | |
144 | * head/fragment page uses 2 copy operations because it | |
145 | * straddles two buffers in the frontend. | |
146 | */ | |
147 | struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE]; | |
148 | struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE]; | |
149 | }; | |
150 | ||
151 | static struct xen_netbk *xen_netbk; | |
152 | static int xen_netbk_group_nr; | |
153 | ||
2810e5b9 WL |
154 | /* |
155 | * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of | |
156 | * one or more merged tx requests, otherwise it is the continuation of | |
157 | * previous tx request. | |
158 | */ | |
159 | static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx) | |
160 | { | |
161 | return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX; | |
162 | } | |
163 | ||
f942dc25 IC |
164 | void xen_netbk_add_xenvif(struct xenvif *vif) |
165 | { | |
166 | int i; | |
167 | int min_netfront_count; | |
168 | int min_group = 0; | |
169 | struct xen_netbk *netbk; | |
170 | ||
171 | min_netfront_count = atomic_read(&xen_netbk[0].netfront_count); | |
172 | for (i = 0; i < xen_netbk_group_nr; i++) { | |
173 | int netfront_count = atomic_read(&xen_netbk[i].netfront_count); | |
174 | if (netfront_count < min_netfront_count) { | |
175 | min_group = i; | |
176 | min_netfront_count = netfront_count; | |
177 | } | |
178 | } | |
179 | ||
180 | netbk = &xen_netbk[min_group]; | |
181 | ||
182 | vif->netbk = netbk; | |
183 | atomic_inc(&netbk->netfront_count); | |
184 | } | |
185 | ||
186 | void xen_netbk_remove_xenvif(struct xenvif *vif) | |
187 | { | |
188 | struct xen_netbk *netbk = vif->netbk; | |
189 | vif->netbk = NULL; | |
190 | atomic_dec(&netbk->netfront_count); | |
191 | } | |
192 | ||
7d5145d8 MD |
193 | static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, |
194 | u8 status); | |
f942dc25 IC |
195 | static void make_tx_response(struct xenvif *vif, |
196 | struct xen_netif_tx_request *txp, | |
197 | s8 st); | |
198 | static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, | |
199 | u16 id, | |
200 | s8 st, | |
201 | u16 offset, | |
202 | u16 size, | |
203 | u16 flags); | |
204 | ||
205 | static inline unsigned long idx_to_pfn(struct xen_netbk *netbk, | |
ea066ad1 | 206 | u16 idx) |
f942dc25 IC |
207 | { |
208 | return page_to_pfn(netbk->mmap_pages[idx]); | |
209 | } | |
210 | ||
211 | static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk, | |
ea066ad1 | 212 | u16 idx) |
f942dc25 IC |
213 | { |
214 | return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx)); | |
215 | } | |
216 | ||
217 | /* extra field used in struct page */ | |
218 | static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk, | |
219 | unsigned int idx) | |
220 | { | |
221 | unsigned int group = netbk - xen_netbk; | |
222 | union page_ext ext = { .e = { .group = group + 1, .idx = idx } }; | |
223 | ||
224 | BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping)); | |
225 | pg->mapping = ext.mapping; | |
226 | } | |
227 | ||
228 | static int get_page_ext(struct page *pg, | |
229 | unsigned int *pgroup, unsigned int *pidx) | |
230 | { | |
231 | union page_ext ext = { .mapping = pg->mapping }; | |
232 | struct xen_netbk *netbk; | |
233 | unsigned int group, idx; | |
234 | ||
235 | group = ext.e.group - 1; | |
236 | ||
237 | if (group < 0 || group >= xen_netbk_group_nr) | |
238 | return 0; | |
239 | ||
240 | netbk = &xen_netbk[group]; | |
241 | ||
242 | idx = ext.e.idx; | |
243 | ||
244 | if ((idx < 0) || (idx >= MAX_PENDING_REQS)) | |
245 | return 0; | |
246 | ||
247 | if (netbk->mmap_pages[idx] != pg) | |
248 | return 0; | |
249 | ||
250 | *pgroup = group; | |
251 | *pidx = idx; | |
252 | ||
253 | return 1; | |
254 | } | |
255 | ||
256 | /* | |
257 | * This is the amount of packet we copy rather than map, so that the | |
258 | * guest can't fiddle with the contents of the headers while we do | |
259 | * packet processing on them (netfilter, routing, etc). | |
260 | */ | |
261 | #define PKT_PROT_LEN (ETH_HLEN + \ | |
262 | VLAN_HLEN + \ | |
263 | sizeof(struct iphdr) + MAX_IPOPTLEN + \ | |
264 | sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE) | |
265 | ||
ea066ad1 IC |
266 | static u16 frag_get_pending_idx(skb_frag_t *frag) |
267 | { | |
268 | return (u16)frag->page_offset; | |
269 | } | |
270 | ||
271 | static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx) | |
272 | { | |
273 | frag->page_offset = pending_idx; | |
274 | } | |
275 | ||
f942dc25 IC |
276 | static inline pending_ring_idx_t pending_index(unsigned i) |
277 | { | |
278 | return i & (MAX_PENDING_REQS-1); | |
279 | } | |
280 | ||
281 | static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk) | |
282 | { | |
283 | return MAX_PENDING_REQS - | |
284 | netbk->pending_prod + netbk->pending_cons; | |
285 | } | |
286 | ||
287 | static void xen_netbk_kick_thread(struct xen_netbk *netbk) | |
288 | { | |
289 | wake_up(&netbk->wq); | |
290 | } | |
291 | ||
292 | static int max_required_rx_slots(struct xenvif *vif) | |
293 | { | |
294 | int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE); | |
295 | ||
2810e5b9 | 296 | /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */ |
f942dc25 IC |
297 | if (vif->can_sg || vif->gso || vif->gso_prefix) |
298 | max += MAX_SKB_FRAGS + 1; /* extra_info + frags */ | |
299 | ||
300 | return max; | |
301 | } | |
302 | ||
303 | int xen_netbk_rx_ring_full(struct xenvif *vif) | |
304 | { | |
305 | RING_IDX peek = vif->rx_req_cons_peek; | |
306 | RING_IDX needed = max_required_rx_slots(vif); | |
307 | ||
308 | return ((vif->rx.sring->req_prod - peek) < needed) || | |
309 | ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed); | |
310 | } | |
311 | ||
312 | int xen_netbk_must_stop_queue(struct xenvif *vif) | |
313 | { | |
314 | if (!xen_netbk_rx_ring_full(vif)) | |
315 | return 0; | |
316 | ||
317 | vif->rx.sring->req_event = vif->rx_req_cons_peek + | |
318 | max_required_rx_slots(vif); | |
319 | mb(); /* request notification /then/ check the queue */ | |
320 | ||
321 | return xen_netbk_rx_ring_full(vif); | |
322 | } | |
323 | ||
324 | /* | |
325 | * Returns true if we should start a new receive buffer instead of | |
326 | * adding 'size' bytes to a buffer which currently contains 'offset' | |
327 | * bytes. | |
328 | */ | |
329 | static bool start_new_rx_buffer(int offset, unsigned long size, int head) | |
330 | { | |
331 | /* simple case: we have completely filled the current buffer. */ | |
332 | if (offset == MAX_BUFFER_OFFSET) | |
333 | return true; | |
334 | ||
335 | /* | |
336 | * complex case: start a fresh buffer if the current frag | |
337 | * would overflow the current buffer but only if: | |
338 | * (i) this frag would fit completely in the next buffer | |
339 | * and (ii) there is already some data in the current buffer | |
340 | * and (iii) this is not the head buffer. | |
341 | * | |
342 | * Where: | |
343 | * - (i) stops us splitting a frag into two copies | |
344 | * unless the frag is too large for a single buffer. | |
345 | * - (ii) stops us from leaving a buffer pointlessly empty. | |
346 | * - (iii) stops us leaving the first buffer | |
347 | * empty. Strictly speaking this is already covered | |
348 | * by (ii) but is explicitly checked because | |
349 | * netfront relies on the first buffer being | |
350 | * non-empty and can crash otherwise. | |
351 | * | |
352 | * This means we will effectively linearise small | |
353 | * frags but do not needlessly split large buffers | |
354 | * into multiple copies tend to give large frags their | |
355 | * own buffers as before. | |
356 | */ | |
357 | if ((offset + size > MAX_BUFFER_OFFSET) && | |
358 | (size <= MAX_BUFFER_OFFSET) && offset && !head) | |
359 | return true; | |
360 | ||
361 | return false; | |
362 | } | |
363 | ||
364 | /* | |
365 | * Figure out how many ring slots we're going to need to send @skb to | |
366 | * the guest. This function is essentially a dry run of | |
367 | * netbk_gop_frag_copy. | |
368 | */ | |
369 | unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) | |
370 | { | |
371 | unsigned int count; | |
372 | int i, copy_off; | |
373 | ||
e26b203e | 374 | count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE); |
f942dc25 IC |
375 | |
376 | copy_off = skb_headlen(skb) % PAGE_SIZE; | |
377 | ||
378 | if (skb_shinfo(skb)->gso_size) | |
379 | count++; | |
380 | ||
381 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
9e903e08 | 382 | unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
6a8ed462 | 383 | unsigned long offset = skb_shinfo(skb)->frags[i].page_offset; |
f942dc25 | 384 | unsigned long bytes; |
6a8ed462 IC |
385 | |
386 | offset &= ~PAGE_MASK; | |
387 | ||
f942dc25 | 388 | while (size > 0) { |
6a8ed462 | 389 | BUG_ON(offset >= PAGE_SIZE); |
f942dc25 IC |
390 | BUG_ON(copy_off > MAX_BUFFER_OFFSET); |
391 | ||
6a8ed462 IC |
392 | bytes = PAGE_SIZE - offset; |
393 | ||
394 | if (bytes > size) | |
395 | bytes = size; | |
396 | ||
397 | if (start_new_rx_buffer(copy_off, bytes, 0)) { | |
f942dc25 IC |
398 | count++; |
399 | copy_off = 0; | |
400 | } | |
401 | ||
f942dc25 IC |
402 | if (copy_off + bytes > MAX_BUFFER_OFFSET) |
403 | bytes = MAX_BUFFER_OFFSET - copy_off; | |
404 | ||
405 | copy_off += bytes; | |
6a8ed462 IC |
406 | |
407 | offset += bytes; | |
f942dc25 | 408 | size -= bytes; |
6a8ed462 IC |
409 | |
410 | if (offset == PAGE_SIZE) | |
411 | offset = 0; | |
f942dc25 IC |
412 | } |
413 | } | |
414 | return count; | |
415 | } | |
416 | ||
417 | struct netrx_pending_operations { | |
418 | unsigned copy_prod, copy_cons; | |
419 | unsigned meta_prod, meta_cons; | |
420 | struct gnttab_copy *copy; | |
421 | struct netbk_rx_meta *meta; | |
422 | int copy_off; | |
423 | grant_ref_t copy_gref; | |
424 | }; | |
425 | ||
426 | static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif, | |
427 | struct netrx_pending_operations *npo) | |
428 | { | |
429 | struct netbk_rx_meta *meta; | |
430 | struct xen_netif_rx_request *req; | |
431 | ||
432 | req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); | |
433 | ||
434 | meta = npo->meta + npo->meta_prod++; | |
435 | meta->gso_size = 0; | |
436 | meta->size = 0; | |
437 | meta->id = req->id; | |
438 | ||
439 | npo->copy_off = 0; | |
440 | npo->copy_gref = req->gref; | |
441 | ||
442 | return meta; | |
443 | } | |
444 | ||
445 | /* | |
446 | * Set up the grant operations for this fragment. If it's a flipping | |
447 | * interface, we also set up the unmap request from here. | |
448 | */ | |
449 | static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, | |
450 | struct netrx_pending_operations *npo, | |
451 | struct page *page, unsigned long size, | |
452 | unsigned long offset, int *head) | |
453 | { | |
454 | struct gnttab_copy *copy_gop; | |
455 | struct netbk_rx_meta *meta; | |
456 | /* | |
e34c0246 | 457 | * These variables are used iff get_page_ext returns true, |
f942dc25 IC |
458 | * in which case they are guaranteed to be initialized. |
459 | */ | |
460 | unsigned int uninitialized_var(group), uninitialized_var(idx); | |
461 | int foreign = get_page_ext(page, &group, &idx); | |
462 | unsigned long bytes; | |
463 | ||
464 | /* Data must not cross a page boundary. */ | |
6a8ed462 | 465 | BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); |
f942dc25 IC |
466 | |
467 | meta = npo->meta + npo->meta_prod - 1; | |
468 | ||
6a8ed462 IC |
469 | /* Skip unused frames from start of page */ |
470 | page += offset >> PAGE_SHIFT; | |
471 | offset &= ~PAGE_MASK; | |
472 | ||
f942dc25 | 473 | while (size > 0) { |
6a8ed462 | 474 | BUG_ON(offset >= PAGE_SIZE); |
f942dc25 IC |
475 | BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); |
476 | ||
6a8ed462 IC |
477 | bytes = PAGE_SIZE - offset; |
478 | ||
479 | if (bytes > size) | |
480 | bytes = size; | |
481 | ||
482 | if (start_new_rx_buffer(npo->copy_off, bytes, *head)) { | |
f942dc25 IC |
483 | /* |
484 | * Netfront requires there to be some data in the head | |
485 | * buffer. | |
486 | */ | |
487 | BUG_ON(*head); | |
488 | ||
489 | meta = get_next_rx_buffer(vif, npo); | |
490 | } | |
491 | ||
f942dc25 IC |
492 | if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) |
493 | bytes = MAX_BUFFER_OFFSET - npo->copy_off; | |
494 | ||
495 | copy_gop = npo->copy + npo->copy_prod++; | |
496 | copy_gop->flags = GNTCOPY_dest_gref; | |
497 | if (foreign) { | |
498 | struct xen_netbk *netbk = &xen_netbk[group]; | |
499 | struct pending_tx_info *src_pend; | |
500 | ||
501 | src_pend = &netbk->pending_tx_info[idx]; | |
502 | ||
503 | copy_gop->source.domid = src_pend->vif->domid; | |
504 | copy_gop->source.u.ref = src_pend->req.gref; | |
505 | copy_gop->flags |= GNTCOPY_source_gref; | |
506 | } else { | |
507 | void *vaddr = page_address(page); | |
508 | copy_gop->source.domid = DOMID_SELF; | |
509 | copy_gop->source.u.gmfn = virt_to_mfn(vaddr); | |
510 | } | |
511 | copy_gop->source.offset = offset; | |
512 | copy_gop->dest.domid = vif->domid; | |
513 | ||
514 | copy_gop->dest.offset = npo->copy_off; | |
515 | copy_gop->dest.u.ref = npo->copy_gref; | |
516 | copy_gop->len = bytes; | |
517 | ||
518 | npo->copy_off += bytes; | |
519 | meta->size += bytes; | |
520 | ||
521 | offset += bytes; | |
522 | size -= bytes; | |
523 | ||
6a8ed462 IC |
524 | /* Next frame */ |
525 | if (offset == PAGE_SIZE && size) { | |
526 | BUG_ON(!PageCompound(page)); | |
527 | page++; | |
528 | offset = 0; | |
529 | } | |
530 | ||
f942dc25 IC |
531 | /* Leave a gap for the GSO descriptor. */ |
532 | if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix) | |
533 | vif->rx.req_cons++; | |
534 | ||
535 | *head = 0; /* There must be something in this buffer now. */ | |
536 | ||
537 | } | |
538 | } | |
539 | ||
540 | /* | |
541 | * Prepare an SKB to be transmitted to the frontend. | |
542 | * | |
543 | * This function is responsible for allocating grant operations, meta | |
544 | * structures, etc. | |
545 | * | |
546 | * It returns the number of meta structures consumed. The number of | |
547 | * ring slots used is always equal to the number of meta slots used | |
548 | * plus the number of GSO descriptors used. Currently, we use either | |
549 | * zero GSO descriptors (for non-GSO packets) or one descriptor (for | |
550 | * frontend-side LRO). | |
551 | */ | |
552 | static int netbk_gop_skb(struct sk_buff *skb, | |
553 | struct netrx_pending_operations *npo) | |
554 | { | |
555 | struct xenvif *vif = netdev_priv(skb->dev); | |
556 | int nr_frags = skb_shinfo(skb)->nr_frags; | |
557 | int i; | |
558 | struct xen_netif_rx_request *req; | |
559 | struct netbk_rx_meta *meta; | |
560 | unsigned char *data; | |
561 | int head = 1; | |
562 | int old_meta_prod; | |
563 | ||
564 | old_meta_prod = npo->meta_prod; | |
565 | ||
566 | /* Set up a GSO prefix descriptor, if necessary */ | |
567 | if (skb_shinfo(skb)->gso_size && vif->gso_prefix) { | |
568 | req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); | |
569 | meta = npo->meta + npo->meta_prod++; | |
570 | meta->gso_size = skb_shinfo(skb)->gso_size; | |
571 | meta->size = 0; | |
572 | meta->id = req->id; | |
573 | } | |
574 | ||
575 | req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); | |
576 | meta = npo->meta + npo->meta_prod++; | |
577 | ||
578 | if (!vif->gso_prefix) | |
579 | meta->gso_size = skb_shinfo(skb)->gso_size; | |
580 | else | |
581 | meta->gso_size = 0; | |
582 | ||
583 | meta->size = 0; | |
584 | meta->id = req->id; | |
585 | npo->copy_off = 0; | |
586 | npo->copy_gref = req->gref; | |
587 | ||
588 | data = skb->data; | |
589 | while (data < skb_tail_pointer(skb)) { | |
590 | unsigned int offset = offset_in_page(data); | |
591 | unsigned int len = PAGE_SIZE - offset; | |
592 | ||
593 | if (data + len > skb_tail_pointer(skb)) | |
594 | len = skb_tail_pointer(skb) - data; | |
595 | ||
596 | netbk_gop_frag_copy(vif, skb, npo, | |
597 | virt_to_page(data), len, offset, &head); | |
598 | data += len; | |
599 | } | |
600 | ||
601 | for (i = 0; i < nr_frags; i++) { | |
602 | netbk_gop_frag_copy(vif, skb, npo, | |
ea066ad1 | 603 | skb_frag_page(&skb_shinfo(skb)->frags[i]), |
9e903e08 | 604 | skb_frag_size(&skb_shinfo(skb)->frags[i]), |
f942dc25 IC |
605 | skb_shinfo(skb)->frags[i].page_offset, |
606 | &head); | |
607 | } | |
608 | ||
609 | return npo->meta_prod - old_meta_prod; | |
610 | } | |
611 | ||
612 | /* | |
613 | * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was | |
614 | * used to set up the operations on the top of | |
615 | * netrx_pending_operations, which have since been done. Check that | |
616 | * they didn't give any errors and advance over them. | |
617 | */ | |
618 | static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots, | |
619 | struct netrx_pending_operations *npo) | |
620 | { | |
621 | struct gnttab_copy *copy_op; | |
622 | int status = XEN_NETIF_RSP_OKAY; | |
623 | int i; | |
624 | ||
625 | for (i = 0; i < nr_meta_slots; i++) { | |
626 | copy_op = npo->copy + npo->copy_cons++; | |
627 | if (copy_op->status != GNTST_okay) { | |
628 | netdev_dbg(vif->dev, | |
629 | "Bad status %d from copy to DOM%d.\n", | |
630 | copy_op->status, vif->domid); | |
631 | status = XEN_NETIF_RSP_ERROR; | |
632 | } | |
633 | } | |
634 | ||
635 | return status; | |
636 | } | |
637 | ||
638 | static void netbk_add_frag_responses(struct xenvif *vif, int status, | |
639 | struct netbk_rx_meta *meta, | |
640 | int nr_meta_slots) | |
641 | { | |
642 | int i; | |
643 | unsigned long offset; | |
644 | ||
645 | /* No fragments used */ | |
646 | if (nr_meta_slots <= 1) | |
647 | return; | |
648 | ||
649 | nr_meta_slots--; | |
650 | ||
651 | for (i = 0; i < nr_meta_slots; i++) { | |
652 | int flags; | |
653 | if (i == nr_meta_slots - 1) | |
654 | flags = 0; | |
655 | else | |
656 | flags = XEN_NETRXF_more_data; | |
657 | ||
658 | offset = 0; | |
659 | make_rx_response(vif, meta[i].id, status, offset, | |
660 | meta[i].size, flags); | |
661 | } | |
662 | } | |
663 | ||
664 | struct skb_cb_overlay { | |
665 | int meta_slots_used; | |
666 | }; | |
667 | ||
668 | static void xen_netbk_rx_action(struct xen_netbk *netbk) | |
669 | { | |
670 | struct xenvif *vif = NULL, *tmp; | |
671 | s8 status; | |
e1f00a69 | 672 | u16 flags; |
f942dc25 IC |
673 | struct xen_netif_rx_response *resp; |
674 | struct sk_buff_head rxq; | |
675 | struct sk_buff *skb; | |
676 | LIST_HEAD(notify); | |
677 | int ret; | |
678 | int nr_frags; | |
679 | int count; | |
680 | unsigned long offset; | |
681 | struct skb_cb_overlay *sco; | |
682 | ||
683 | struct netrx_pending_operations npo = { | |
684 | .copy = netbk->grant_copy_op, | |
685 | .meta = netbk->meta, | |
686 | }; | |
687 | ||
688 | skb_queue_head_init(&rxq); | |
689 | ||
690 | count = 0; | |
691 | ||
692 | while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) { | |
693 | vif = netdev_priv(skb->dev); | |
694 | nr_frags = skb_shinfo(skb)->nr_frags; | |
695 | ||
696 | sco = (struct skb_cb_overlay *)skb->cb; | |
697 | sco->meta_slots_used = netbk_gop_skb(skb, &npo); | |
698 | ||
699 | count += nr_frags + 1; | |
700 | ||
701 | __skb_queue_tail(&rxq, skb); | |
702 | ||
703 | /* Filled the batch queue? */ | |
2810e5b9 | 704 | /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */ |
f942dc25 IC |
705 | if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE) |
706 | break; | |
707 | } | |
708 | ||
709 | BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta)); | |
710 | ||
711 | if (!npo.copy_prod) | |
712 | return; | |
713 | ||
714 | BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op)); | |
c571898f | 715 | gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod); |
f942dc25 IC |
716 | |
717 | while ((skb = __skb_dequeue(&rxq)) != NULL) { | |
718 | sco = (struct skb_cb_overlay *)skb->cb; | |
719 | ||
720 | vif = netdev_priv(skb->dev); | |
721 | ||
722 | if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) { | |
723 | resp = RING_GET_RESPONSE(&vif->rx, | |
724 | vif->rx.rsp_prod_pvt++); | |
725 | ||
726 | resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data; | |
727 | ||
728 | resp->offset = netbk->meta[npo.meta_cons].gso_size; | |
729 | resp->id = netbk->meta[npo.meta_cons].id; | |
730 | resp->status = sco->meta_slots_used; | |
731 | ||
732 | npo.meta_cons++; | |
733 | sco->meta_slots_used--; | |
734 | } | |
735 | ||
736 | ||
737 | vif->dev->stats.tx_bytes += skb->len; | |
738 | vif->dev->stats.tx_packets++; | |
739 | ||
740 | status = netbk_check_gop(vif, sco->meta_slots_used, &npo); | |
741 | ||
742 | if (sco->meta_slots_used == 1) | |
743 | flags = 0; | |
744 | else | |
745 | flags = XEN_NETRXF_more_data; | |
746 | ||
747 | if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ | |
748 | flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated; | |
749 | else if (skb->ip_summed == CHECKSUM_UNNECESSARY) | |
750 | /* remote but checksummed. */ | |
751 | flags |= XEN_NETRXF_data_validated; | |
752 | ||
753 | offset = 0; | |
754 | resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id, | |
755 | status, offset, | |
756 | netbk->meta[npo.meta_cons].size, | |
757 | flags); | |
758 | ||
759 | if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) { | |
760 | struct xen_netif_extra_info *gso = | |
761 | (struct xen_netif_extra_info *) | |
762 | RING_GET_RESPONSE(&vif->rx, | |
763 | vif->rx.rsp_prod_pvt++); | |
764 | ||
765 | resp->flags |= XEN_NETRXF_extra_info; | |
766 | ||
767 | gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size; | |
768 | gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; | |
769 | gso->u.gso.pad = 0; | |
770 | gso->u.gso.features = 0; | |
771 | ||
772 | gso->type = XEN_NETIF_EXTRA_TYPE_GSO; | |
773 | gso->flags = 0; | |
774 | } | |
775 | ||
776 | netbk_add_frag_responses(vif, status, | |
777 | netbk->meta + npo.meta_cons + 1, | |
778 | sco->meta_slots_used); | |
779 | ||
780 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); | |
f942dc25 IC |
781 | |
782 | xenvif_notify_tx_completion(vif); | |
783 | ||
94f950c4 JB |
784 | if (ret && list_empty(&vif->notify_list)) |
785 | list_add_tail(&vif->notify_list, ¬ify); | |
786 | else | |
787 | xenvif_put(vif); | |
f942dc25 IC |
788 | npo.meta_cons += sco->meta_slots_used; |
789 | dev_kfree_skb(skb); | |
790 | } | |
791 | ||
792 | list_for_each_entry_safe(vif, tmp, ¬ify, notify_list) { | |
e1f00a69 | 793 | notify_remote_via_irq(vif->rx_irq); |
f942dc25 | 794 | list_del_init(&vif->notify_list); |
94f950c4 | 795 | xenvif_put(vif); |
f942dc25 IC |
796 | } |
797 | ||
798 | /* More work to do? */ | |
799 | if (!skb_queue_empty(&netbk->rx_queue) && | |
800 | !timer_pending(&netbk->net_timer)) | |
801 | xen_netbk_kick_thread(netbk); | |
802 | } | |
803 | ||
804 | void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb) | |
805 | { | |
806 | struct xen_netbk *netbk = vif->netbk; | |
807 | ||
808 | skb_queue_tail(&netbk->rx_queue, skb); | |
809 | ||
810 | xen_netbk_kick_thread(netbk); | |
811 | } | |
812 | ||
813 | static void xen_netbk_alarm(unsigned long data) | |
814 | { | |
815 | struct xen_netbk *netbk = (struct xen_netbk *)data; | |
816 | xen_netbk_kick_thread(netbk); | |
817 | } | |
818 | ||
819 | static int __on_net_schedule_list(struct xenvif *vif) | |
820 | { | |
821 | return !list_empty(&vif->schedule_list); | |
822 | } | |
823 | ||
824 | /* Must be called with net_schedule_list_lock held */ | |
825 | static void remove_from_net_schedule_list(struct xenvif *vif) | |
826 | { | |
827 | if (likely(__on_net_schedule_list(vif))) { | |
828 | list_del_init(&vif->schedule_list); | |
829 | xenvif_put(vif); | |
830 | } | |
831 | } | |
832 | ||
833 | static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk) | |
834 | { | |
835 | struct xenvif *vif = NULL; | |
836 | ||
837 | spin_lock_irq(&netbk->net_schedule_list_lock); | |
838 | if (list_empty(&netbk->net_schedule_list)) | |
839 | goto out; | |
840 | ||
841 | vif = list_first_entry(&netbk->net_schedule_list, | |
842 | struct xenvif, schedule_list); | |
843 | if (!vif) | |
844 | goto out; | |
845 | ||
846 | xenvif_get(vif); | |
847 | ||
848 | remove_from_net_schedule_list(vif); | |
849 | out: | |
850 | spin_unlock_irq(&netbk->net_schedule_list_lock); | |
851 | return vif; | |
852 | } | |
853 | ||
854 | void xen_netbk_schedule_xenvif(struct xenvif *vif) | |
855 | { | |
856 | unsigned long flags; | |
857 | struct xen_netbk *netbk = vif->netbk; | |
858 | ||
859 | if (__on_net_schedule_list(vif)) | |
860 | goto kick; | |
861 | ||
862 | spin_lock_irqsave(&netbk->net_schedule_list_lock, flags); | |
863 | if (!__on_net_schedule_list(vif) && | |
864 | likely(xenvif_schedulable(vif))) { | |
865 | list_add_tail(&vif->schedule_list, &netbk->net_schedule_list); | |
866 | xenvif_get(vif); | |
867 | } | |
868 | spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags); | |
869 | ||
870 | kick: | |
871 | smp_mb(); | |
872 | if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) && | |
873 | !list_empty(&netbk->net_schedule_list)) | |
874 | xen_netbk_kick_thread(netbk); | |
875 | } | |
876 | ||
877 | void xen_netbk_deschedule_xenvif(struct xenvif *vif) | |
878 | { | |
879 | struct xen_netbk *netbk = vif->netbk; | |
880 | spin_lock_irq(&netbk->net_schedule_list_lock); | |
881 | remove_from_net_schedule_list(vif); | |
882 | spin_unlock_irq(&netbk->net_schedule_list_lock); | |
883 | } | |
884 | ||
885 | void xen_netbk_check_rx_xenvif(struct xenvif *vif) | |
886 | { | |
887 | int more_to_do; | |
888 | ||
889 | RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); | |
890 | ||
891 | if (more_to_do) | |
892 | xen_netbk_schedule_xenvif(vif); | |
893 | } | |
894 | ||
895 | static void tx_add_credit(struct xenvif *vif) | |
896 | { | |
897 | unsigned long max_burst, max_credit; | |
898 | ||
899 | /* | |
900 | * Allow a burst big enough to transmit a jumbo packet of up to 128kB. | |
901 | * Otherwise the interface can seize up due to insufficient credit. | |
902 | */ | |
903 | max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size; | |
904 | max_burst = min(max_burst, 131072UL); | |
905 | max_burst = max(max_burst, vif->credit_bytes); | |
906 | ||
907 | /* Take care that adding a new chunk of credit doesn't wrap to zero. */ | |
908 | max_credit = vif->remaining_credit + vif->credit_bytes; | |
909 | if (max_credit < vif->remaining_credit) | |
910 | max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ | |
911 | ||
912 | vif->remaining_credit = min(max_credit, max_burst); | |
913 | } | |
914 | ||
915 | static void tx_credit_callback(unsigned long data) | |
916 | { | |
917 | struct xenvif *vif = (struct xenvif *)data; | |
918 | tx_add_credit(vif); | |
919 | xen_netbk_check_rx_xenvif(vif); | |
920 | } | |
921 | ||
922 | static void netbk_tx_err(struct xenvif *vif, | |
923 | struct xen_netif_tx_request *txp, RING_IDX end) | |
924 | { | |
925 | RING_IDX cons = vif->tx.req_cons; | |
926 | ||
927 | do { | |
928 | make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); | |
b9149729 | 929 | if (cons == end) |
f942dc25 IC |
930 | break; |
931 | txp = RING_GET_REQUEST(&vif->tx, cons++); | |
932 | } while (1); | |
933 | vif->tx.req_cons = cons; | |
934 | xen_netbk_check_rx_xenvif(vif); | |
935 | xenvif_put(vif); | |
936 | } | |
937 | ||
48856286 IC |
938 | static void netbk_fatal_tx_err(struct xenvif *vif) |
939 | { | |
940 | netdev_err(vif->dev, "fatal error; disabling device\n"); | |
941 | xenvif_carrier_off(vif); | |
629821d9 | 942 | xenvif_put(vif); |
48856286 IC |
943 | } |
944 | ||
f942dc25 IC |
945 | static int netbk_count_requests(struct xenvif *vif, |
946 | struct xen_netif_tx_request *first, | |
947 | struct xen_netif_tx_request *txp, | |
948 | int work_to_do) | |
949 | { | |
950 | RING_IDX cons = vif->tx.req_cons; | |
2810e5b9 WL |
951 | int slots = 0; |
952 | int drop_err = 0; | |
59ccb4eb | 953 | int more_data; |
f942dc25 IC |
954 | |
955 | if (!(first->flags & XEN_NETTXF_more_data)) | |
956 | return 0; | |
957 | ||
958 | do { | |
59ccb4eb WL |
959 | struct xen_netif_tx_request dropped_tx = { 0 }; |
960 | ||
2810e5b9 WL |
961 | if (slots >= work_to_do) { |
962 | netdev_err(vif->dev, | |
963 | "Asked for %d slots but exceeds this limit\n", | |
964 | work_to_do); | |
48856286 | 965 | netbk_fatal_tx_err(vif); |
35876b5f | 966 | return -ENODATA; |
f942dc25 IC |
967 | } |
968 | ||
2810e5b9 WL |
969 | /* This guest is really using too many slots and |
970 | * considered malicious. | |
971 | */ | |
37641494 | 972 | if (unlikely(slots >= fatal_skb_slots)) { |
2810e5b9 WL |
973 | netdev_err(vif->dev, |
974 | "Malicious frontend using %d slots, threshold %u\n", | |
37641494 | 975 | slots, fatal_skb_slots); |
48856286 | 976 | netbk_fatal_tx_err(vif); |
35876b5f | 977 | return -E2BIG; |
f942dc25 IC |
978 | } |
979 | ||
2810e5b9 | 980 | /* Xen network protocol had implicit dependency on |
37641494 WL |
981 | * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to |
982 | * the historical MAX_SKB_FRAGS value 18 to honor the | |
983 | * same behavior as before. Any packet using more than | |
984 | * 18 slots but less than fatal_skb_slots slots is | |
985 | * dropped | |
2810e5b9 | 986 | */ |
37641494 | 987 | if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) { |
2810e5b9 WL |
988 | if (net_ratelimit()) |
989 | netdev_dbg(vif->dev, | |
990 | "Too many slots (%d) exceeding limit (%d), dropping packet\n", | |
37641494 | 991 | slots, XEN_NETBK_LEGACY_SLOTS_MAX); |
2810e5b9 WL |
992 | drop_err = -E2BIG; |
993 | } | |
994 | ||
59ccb4eb WL |
995 | if (drop_err) |
996 | txp = &dropped_tx; | |
997 | ||
2810e5b9 | 998 | memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots), |
f942dc25 | 999 | sizeof(*txp)); |
03393fd5 WL |
1000 | |
1001 | /* If the guest submitted a frame >= 64 KiB then | |
1002 | * first->size overflowed and following slots will | |
1003 | * appear to be larger than the frame. | |
1004 | * | |
1005 | * This cannot be fatal error as there are buggy | |
1006 | * frontends that do this. | |
1007 | * | |
1008 | * Consume all slots and drop the packet. | |
1009 | */ | |
1010 | if (!drop_err && txp->size > first->size) { | |
1011 | if (net_ratelimit()) | |
1012 | netdev_dbg(vif->dev, | |
1013 | "Invalid tx request, slot size %u > remaining size %u\n", | |
1014 | txp->size, first->size); | |
1015 | drop_err = -EIO; | |
f942dc25 IC |
1016 | } |
1017 | ||
1018 | first->size -= txp->size; | |
2810e5b9 | 1019 | slots++; |
f942dc25 IC |
1020 | |
1021 | if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { | |
2810e5b9 | 1022 | netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n", |
f942dc25 | 1023 | txp->offset, txp->size); |
48856286 | 1024 | netbk_fatal_tx_err(vif); |
35876b5f | 1025 | return -EINVAL; |
f942dc25 | 1026 | } |
59ccb4eb WL |
1027 | |
1028 | more_data = txp->flags & XEN_NETTXF_more_data; | |
1029 | ||
1030 | if (!drop_err) | |
1031 | txp++; | |
1032 | ||
1033 | } while (more_data); | |
2810e5b9 WL |
1034 | |
1035 | if (drop_err) { | |
ac69c26e | 1036 | netbk_tx_err(vif, first, cons + slots); |
2810e5b9 WL |
1037 | return drop_err; |
1038 | } | |
1039 | ||
1040 | return slots; | |
f942dc25 IC |
1041 | } |
1042 | ||
1043 | static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk, | |
ea066ad1 | 1044 | u16 pending_idx) |
f942dc25 IC |
1045 | { |
1046 | struct page *page; | |
1047 | page = alloc_page(GFP_KERNEL|__GFP_COLD); | |
1048 | if (!page) | |
1049 | return NULL; | |
1050 | set_page_ext(page, netbk, pending_idx); | |
1051 | netbk->mmap_pages[pending_idx] = page; | |
1052 | return page; | |
1053 | } | |
1054 | ||
1055 | static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, | |
1056 | struct xenvif *vif, | |
1057 | struct sk_buff *skb, | |
1058 | struct xen_netif_tx_request *txp, | |
1059 | struct gnttab_copy *gop) | |
1060 | { | |
1061 | struct skb_shared_info *shinfo = skb_shinfo(skb); | |
1062 | skb_frag_t *frags = shinfo->frags; | |
ea066ad1 | 1063 | u16 pending_idx = *((u16 *)skb->data); |
2810e5b9 WL |
1064 | u16 head_idx = 0; |
1065 | int slot, start; | |
1066 | struct page *page; | |
1067 | pending_ring_idx_t index, start_idx = 0; | |
1068 | uint16_t dst_offset; | |
1069 | unsigned int nr_slots; | |
1070 | struct pending_tx_info *first = NULL; | |
1071 | ||
1072 | /* At this point shinfo->nr_frags is in fact the number of | |
37641494 | 1073 | * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. |
2810e5b9 WL |
1074 | */ |
1075 | nr_slots = shinfo->nr_frags; | |
f942dc25 IC |
1076 | |
1077 | /* Skip first skb fragment if it is on same page as header fragment. */ | |
ea066ad1 | 1078 | start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); |
f942dc25 | 1079 | |
2810e5b9 WL |
1080 | /* Coalesce tx requests, at this point the packet passed in |
1081 | * should be <= 64K. Any packets larger than 64K have been | |
1082 | * handled in netbk_count_requests(). | |
1083 | */ | |
1084 | for (shinfo->nr_frags = slot = start; slot < nr_slots; | |
1085 | shinfo->nr_frags++) { | |
f942dc25 IC |
1086 | struct pending_tx_info *pending_tx_info = |
1087 | netbk->pending_tx_info; | |
1088 | ||
2810e5b9 | 1089 | page = alloc_page(GFP_KERNEL|__GFP_COLD); |
f942dc25 | 1090 | if (!page) |
4cc7c1cb | 1091 | goto err; |
f942dc25 | 1092 | |
2810e5b9 WL |
1093 | dst_offset = 0; |
1094 | first = NULL; | |
1095 | while (dst_offset < PAGE_SIZE && slot < nr_slots) { | |
1096 | gop->flags = GNTCOPY_source_gref; | |
1097 | ||
1098 | gop->source.u.ref = txp->gref; | |
1099 | gop->source.domid = vif->domid; | |
1100 | gop->source.offset = txp->offset; | |
1101 | ||
1102 | gop->dest.domid = DOMID_SELF; | |
1103 | ||
1104 | gop->dest.offset = dst_offset; | |
1105 | gop->dest.u.gmfn = virt_to_mfn(page_address(page)); | |
1106 | ||
1107 | if (dst_offset + txp->size > PAGE_SIZE) { | |
1108 | /* This page can only merge a portion | |
1109 | * of tx request. Do not increment any | |
1110 | * pointer / counter here. The txp | |
1111 | * will be dealt with in future | |
1112 | * rounds, eventually hitting the | |
1113 | * `else` branch. | |
1114 | */ | |
1115 | gop->len = PAGE_SIZE - dst_offset; | |
1116 | txp->offset += gop->len; | |
1117 | txp->size -= gop->len; | |
1118 | dst_offset += gop->len; /* quit loop */ | |
1119 | } else { | |
1120 | /* This tx request can be merged in the page */ | |
1121 | gop->len = txp->size; | |
1122 | dst_offset += gop->len; | |
1123 | ||
1124 | index = pending_index(netbk->pending_cons++); | |
1125 | ||
1126 | pending_idx = netbk->pending_ring[index]; | |
1127 | ||
1128 | memcpy(&pending_tx_info[pending_idx].req, txp, | |
1129 | sizeof(*txp)); | |
1130 | xenvif_get(vif); | |
1131 | ||
1132 | pending_tx_info[pending_idx].vif = vif; | |
1133 | ||
1134 | /* Poison these fields, corresponding | |
1135 | * fields for head tx req will be set | |
1136 | * to correct values after the loop. | |
1137 | */ | |
1138 | netbk->mmap_pages[pending_idx] = (void *)(~0UL); | |
1139 | pending_tx_info[pending_idx].head = | |
1140 | INVALID_PENDING_RING_IDX; | |
1141 | ||
1142 | if (!first) { | |
1143 | first = &pending_tx_info[pending_idx]; | |
1144 | start_idx = index; | |
1145 | head_idx = pending_idx; | |
1146 | } | |
1147 | ||
1148 | txp++; | |
1149 | slot++; | |
1150 | } | |
f942dc25 | 1151 | |
2810e5b9 WL |
1152 | gop++; |
1153 | } | |
f942dc25 | 1154 | |
2810e5b9 WL |
1155 | first->req.offset = 0; |
1156 | first->req.size = dst_offset; | |
1157 | first->head = start_idx; | |
1158 | set_page_ext(page, netbk, head_idx); | |
1159 | netbk->mmap_pages[head_idx] = page; | |
1160 | frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx); | |
f942dc25 IC |
1161 | } |
1162 | ||
2810e5b9 WL |
1163 | BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS); |
1164 | ||
f942dc25 | 1165 | return gop; |
4cc7c1cb IC |
1166 | err: |
1167 | /* Unwind, freeing all pages and sending error responses. */ | |
2810e5b9 WL |
1168 | while (shinfo->nr_frags-- > start) { |
1169 | xen_netbk_idx_release(netbk, | |
1170 | frag_get_pending_idx(&frags[shinfo->nr_frags]), | |
1171 | XEN_NETIF_RSP_ERROR); | |
4cc7c1cb IC |
1172 | } |
1173 | /* The head too, if necessary. */ | |
1174 | if (start) | |
1175 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); | |
1176 | ||
1177 | return NULL; | |
f942dc25 IC |
1178 | } |
1179 | ||
1180 | static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, | |
1181 | struct sk_buff *skb, | |
1182 | struct gnttab_copy **gopp) | |
1183 | { | |
1184 | struct gnttab_copy *gop = *gopp; | |
ea066ad1 | 1185 | u16 pending_idx = *((u16 *)skb->data); |
f942dc25 | 1186 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
2810e5b9 | 1187 | struct pending_tx_info *tx_info; |
f942dc25 IC |
1188 | int nr_frags = shinfo->nr_frags; |
1189 | int i, err, start; | |
2810e5b9 | 1190 | u16 peek; /* peek into next tx request */ |
f942dc25 IC |
1191 | |
1192 | /* Check status of header. */ | |
1193 | err = gop->status; | |
7d5145d8 MD |
1194 | if (unlikely(err)) |
1195 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); | |
f942dc25 IC |
1196 | |
1197 | /* Skip first skb fragment if it is on same page as header fragment. */ | |
ea066ad1 | 1198 | start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); |
f942dc25 IC |
1199 | |
1200 | for (i = start; i < nr_frags; i++) { | |
1201 | int j, newerr; | |
2810e5b9 | 1202 | pending_ring_idx_t head; |
f942dc25 | 1203 | |
ea066ad1 | 1204 | pending_idx = frag_get_pending_idx(&shinfo->frags[i]); |
2810e5b9 WL |
1205 | tx_info = &netbk->pending_tx_info[pending_idx]; |
1206 | head = tx_info->head; | |
f942dc25 IC |
1207 | |
1208 | /* Check error status: if okay then remember grant handle. */ | |
2810e5b9 WL |
1209 | do { |
1210 | newerr = (++gop)->status; | |
1211 | if (newerr) | |
1212 | break; | |
1213 | peek = netbk->pending_ring[pending_index(++head)]; | |
1214 | } while (!pending_tx_is_head(netbk, peek)); | |
1215 | ||
f942dc25 IC |
1216 | if (likely(!newerr)) { |
1217 | /* Had a previous error? Invalidate this fragment. */ | |
1218 | if (unlikely(err)) | |
7d5145d8 | 1219 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); |
f942dc25 IC |
1220 | continue; |
1221 | } | |
1222 | ||
1223 | /* Error on this fragment: respond to client with an error. */ | |
7d5145d8 | 1224 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); |
f942dc25 IC |
1225 | |
1226 | /* Not the first error? Preceding frags already invalidated. */ | |
1227 | if (err) | |
1228 | continue; | |
1229 | ||
1230 | /* First error: invalidate header and preceding fragments. */ | |
1231 | pending_idx = *((u16 *)skb->data); | |
7d5145d8 | 1232 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); |
f942dc25 | 1233 | for (j = start; j < i; j++) { |
5ccb3ea7 | 1234 | pending_idx = frag_get_pending_idx(&shinfo->frags[j]); |
7d5145d8 | 1235 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); |
f942dc25 IC |
1236 | } |
1237 | ||
1238 | /* Remember the error: invalidate all subsequent fragments. */ | |
1239 | err = newerr; | |
1240 | } | |
1241 | ||
1242 | *gopp = gop + 1; | |
1243 | return err; | |
1244 | } | |
1245 | ||
1246 | static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb) | |
1247 | { | |
1248 | struct skb_shared_info *shinfo = skb_shinfo(skb); | |
1249 | int nr_frags = shinfo->nr_frags; | |
1250 | int i; | |
1251 | ||
1252 | for (i = 0; i < nr_frags; i++) { | |
1253 | skb_frag_t *frag = shinfo->frags + i; | |
1254 | struct xen_netif_tx_request *txp; | |
ea066ad1 IC |
1255 | struct page *page; |
1256 | u16 pending_idx; | |
f942dc25 | 1257 | |
ea066ad1 | 1258 | pending_idx = frag_get_pending_idx(frag); |
f942dc25 IC |
1259 | |
1260 | txp = &netbk->pending_tx_info[pending_idx].req; | |
ea066ad1 IC |
1261 | page = virt_to_page(idx_to_kaddr(netbk, pending_idx)); |
1262 | __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); | |
f942dc25 IC |
1263 | skb->len += txp->size; |
1264 | skb->data_len += txp->size; | |
1265 | skb->truesize += txp->size; | |
1266 | ||
1267 | /* Take an extra reference to offset xen_netbk_idx_release */ | |
1268 | get_page(netbk->mmap_pages[pending_idx]); | |
7d5145d8 | 1269 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); |
f942dc25 IC |
1270 | } |
1271 | } | |
1272 | ||
1273 | static int xen_netbk_get_extras(struct xenvif *vif, | |
1274 | struct xen_netif_extra_info *extras, | |
1275 | int work_to_do) | |
1276 | { | |
1277 | struct xen_netif_extra_info extra; | |
1278 | RING_IDX cons = vif->tx.req_cons; | |
1279 | ||
1280 | do { | |
1281 | if (unlikely(work_to_do-- <= 0)) { | |
48856286 IC |
1282 | netdev_err(vif->dev, "Missing extra info\n"); |
1283 | netbk_fatal_tx_err(vif); | |
f942dc25 IC |
1284 | return -EBADR; |
1285 | } | |
1286 | ||
1287 | memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons), | |
1288 | sizeof(extra)); | |
1289 | if (unlikely(!extra.type || | |
1290 | extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { | |
1291 | vif->tx.req_cons = ++cons; | |
48856286 | 1292 | netdev_err(vif->dev, |
f942dc25 | 1293 | "Invalid extra type: %d\n", extra.type); |
48856286 | 1294 | netbk_fatal_tx_err(vif); |
f942dc25 IC |
1295 | return -EINVAL; |
1296 | } | |
1297 | ||
1298 | memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); | |
1299 | vif->tx.req_cons = ++cons; | |
1300 | } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); | |
1301 | ||
1302 | return work_to_do; | |
1303 | } | |
1304 | ||
1305 | static int netbk_set_skb_gso(struct xenvif *vif, | |
1306 | struct sk_buff *skb, | |
1307 | struct xen_netif_extra_info *gso) | |
1308 | { | |
1309 | if (!gso->u.gso.size) { | |
48856286 IC |
1310 | netdev_err(vif->dev, "GSO size must not be zero.\n"); |
1311 | netbk_fatal_tx_err(vif); | |
f942dc25 IC |
1312 | return -EINVAL; |
1313 | } | |
1314 | ||
1315 | /* Currently only TCPv4 S.O. is supported. */ | |
1316 | if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { | |
48856286 IC |
1317 | netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); |
1318 | netbk_fatal_tx_err(vif); | |
f942dc25 IC |
1319 | return -EINVAL; |
1320 | } | |
1321 | ||
1322 | skb_shinfo(skb)->gso_size = gso->u.gso.size; | |
1323 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; | |
1324 | ||
1325 | /* Header must be checked, and gso_segs computed. */ | |
1326 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | |
1327 | skb_shinfo(skb)->gso_segs = 0; | |
1328 | ||
1329 | return 0; | |
1330 | } | |
1331 | ||
1332 | static int checksum_setup(struct xenvif *vif, struct sk_buff *skb) | |
1333 | { | |
1334 | struct iphdr *iph; | |
f942dc25 IC |
1335 | int err = -EPROTO; |
1336 | int recalculate_partial_csum = 0; | |
1337 | ||
1338 | /* | |
1339 | * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy | |
1340 | * peers can fail to set NETRXF_csum_blank when sending a GSO | |
1341 | * frame. In this case force the SKB to CHECKSUM_PARTIAL and | |
1342 | * recalculate the partial checksum. | |
1343 | */ | |
1344 | if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { | |
1345 | vif->rx_gso_checksum_fixup++; | |
1346 | skb->ip_summed = CHECKSUM_PARTIAL; | |
1347 | recalculate_partial_csum = 1; | |
1348 | } | |
1349 | ||
1350 | /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ | |
1351 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
1352 | return 0; | |
1353 | ||
1354 | if (skb->protocol != htons(ETH_P_IP)) | |
1355 | goto out; | |
1356 | ||
1357 | iph = (void *)skb->data; | |
f942dc25 IC |
1358 | switch (iph->protocol) { |
1359 | case IPPROTO_TCP: | |
bea89336 JW |
1360 | if (!skb_partial_csum_set(skb, 4 * iph->ihl, |
1361 | offsetof(struct tcphdr, check))) | |
1362 | goto out; | |
f942dc25 IC |
1363 | |
1364 | if (recalculate_partial_csum) { | |
bea89336 | 1365 | struct tcphdr *tcph = tcp_hdr(skb); |
f942dc25 IC |
1366 | tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, |
1367 | skb->len - iph->ihl*4, | |
1368 | IPPROTO_TCP, 0); | |
1369 | } | |
1370 | break; | |
1371 | case IPPROTO_UDP: | |
bea89336 JW |
1372 | if (!skb_partial_csum_set(skb, 4 * iph->ihl, |
1373 | offsetof(struct udphdr, check))) | |
1374 | goto out; | |
f942dc25 IC |
1375 | |
1376 | if (recalculate_partial_csum) { | |
bea89336 | 1377 | struct udphdr *udph = udp_hdr(skb); |
f942dc25 IC |
1378 | udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, |
1379 | skb->len - iph->ihl*4, | |
1380 | IPPROTO_UDP, 0); | |
1381 | } | |
1382 | break; | |
1383 | default: | |
1384 | if (net_ratelimit()) | |
1385 | netdev_err(vif->dev, | |
1386 | "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n", | |
1387 | iph->protocol); | |
1388 | goto out; | |
1389 | } | |
1390 | ||
f942dc25 IC |
1391 | err = 0; |
1392 | ||
1393 | out: | |
1394 | return err; | |
1395 | } | |
1396 | ||
1397 | static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) | |
1398 | { | |
1399 | unsigned long now = jiffies; | |
1400 | unsigned long next_credit = | |
1401 | vif->credit_timeout.expires + | |
1402 | msecs_to_jiffies(vif->credit_usec / 1000); | |
1403 | ||
1404 | /* Timer could already be pending in rare cases. */ | |
1405 | if (timer_pending(&vif->credit_timeout)) | |
1406 | return true; | |
1407 | ||
1408 | /* Passed the point where we can replenish credit? */ | |
1409 | if (time_after_eq(now, next_credit)) { | |
1410 | vif->credit_timeout.expires = now; | |
1411 | tx_add_credit(vif); | |
1412 | } | |
1413 | ||
1414 | /* Still too big to send right now? Set a callback. */ | |
1415 | if (size > vif->remaining_credit) { | |
1416 | vif->credit_timeout.data = | |
1417 | (unsigned long)vif; | |
1418 | vif->credit_timeout.function = | |
1419 | tx_credit_callback; | |
1420 | mod_timer(&vif->credit_timeout, | |
1421 | next_credit); | |
1422 | ||
1423 | return true; | |
1424 | } | |
1425 | ||
1426 | return false; | |
1427 | } | |
1428 | ||
1429 | static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) | |
1430 | { | |
1431 | struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop; | |
1432 | struct sk_buff *skb; | |
1433 | int ret; | |
1434 | ||
37641494 | 1435 | while ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX |
2810e5b9 | 1436 | < MAX_PENDING_REQS) && |
f942dc25 IC |
1437 | !list_empty(&netbk->net_schedule_list)) { |
1438 | struct xenvif *vif; | |
1439 | struct xen_netif_tx_request txreq; | |
37641494 | 1440 | struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; |
f942dc25 IC |
1441 | struct page *page; |
1442 | struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; | |
1443 | u16 pending_idx; | |
1444 | RING_IDX idx; | |
1445 | int work_to_do; | |
1446 | unsigned int data_len; | |
1447 | pending_ring_idx_t index; | |
1448 | ||
1449 | /* Get a netif from the list with work to do. */ | |
1450 | vif = poll_net_schedule_list(netbk); | |
48856286 IC |
1451 | /* This can sometimes happen because the test of |
1452 | * list_empty(net_schedule_list) at the top of the | |
1453 | * loop is unlocked. Just go back and have another | |
1454 | * look. | |
1455 | */ | |
f942dc25 IC |
1456 | if (!vif) |
1457 | continue; | |
1458 | ||
48856286 IC |
1459 | if (vif->tx.sring->req_prod - vif->tx.req_cons > |
1460 | XEN_NETIF_TX_RING_SIZE) { | |
1461 | netdev_err(vif->dev, | |
1462 | "Impossible number of requests. " | |
1463 | "req_prod %d, req_cons %d, size %ld\n", | |
1464 | vif->tx.sring->req_prod, vif->tx.req_cons, | |
1465 | XEN_NETIF_TX_RING_SIZE); | |
1466 | netbk_fatal_tx_err(vif); | |
1467 | continue; | |
1468 | } | |
1469 | ||
f942dc25 IC |
1470 | RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); |
1471 | if (!work_to_do) { | |
1472 | xenvif_put(vif); | |
1473 | continue; | |
1474 | } | |
1475 | ||
1476 | idx = vif->tx.req_cons; | |
1477 | rmb(); /* Ensure that we see the request before we copy it. */ | |
1478 | memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq)); | |
1479 | ||
1480 | /* Credit-based scheduling. */ | |
1481 | if (txreq.size > vif->remaining_credit && | |
1482 | tx_credit_exceeded(vif, txreq.size)) { | |
1483 | xenvif_put(vif); | |
1484 | continue; | |
1485 | } | |
1486 | ||
1487 | vif->remaining_credit -= txreq.size; | |
1488 | ||
1489 | work_to_do--; | |
1490 | vif->tx.req_cons = ++idx; | |
1491 | ||
1492 | memset(extras, 0, sizeof(extras)); | |
1493 | if (txreq.flags & XEN_NETTXF_extra_info) { | |
1494 | work_to_do = xen_netbk_get_extras(vif, extras, | |
1495 | work_to_do); | |
1496 | idx = vif->tx.req_cons; | |
48856286 | 1497 | if (unlikely(work_to_do < 0)) |
f942dc25 | 1498 | continue; |
f942dc25 IC |
1499 | } |
1500 | ||
ac69c26e | 1501 | ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); |
48856286 | 1502 | if (unlikely(ret < 0)) |
f942dc25 | 1503 | continue; |
48856286 | 1504 | |
f942dc25 IC |
1505 | idx += ret; |
1506 | ||
1507 | if (unlikely(txreq.size < ETH_HLEN)) { | |
1508 | netdev_dbg(vif->dev, | |
1509 | "Bad packet size: %d\n", txreq.size); | |
1510 | netbk_tx_err(vif, &txreq, idx); | |
1511 | continue; | |
1512 | } | |
1513 | ||
1514 | /* No crossing a page as the payload mustn't fragment. */ | |
1515 | if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { | |
48856286 | 1516 | netdev_err(vif->dev, |
f942dc25 IC |
1517 | "txreq.offset: %x, size: %u, end: %lu\n", |
1518 | txreq.offset, txreq.size, | |
1519 | (txreq.offset&~PAGE_MASK) + txreq.size); | |
48856286 | 1520 | netbk_fatal_tx_err(vif); |
f942dc25 IC |
1521 | continue; |
1522 | } | |
1523 | ||
1524 | index = pending_index(netbk->pending_cons); | |
1525 | pending_idx = netbk->pending_ring[index]; | |
1526 | ||
1527 | data_len = (txreq.size > PKT_PROT_LEN && | |
37641494 | 1528 | ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? |
f942dc25 IC |
1529 | PKT_PROT_LEN : txreq.size; |
1530 | ||
1531 | skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN, | |
1532 | GFP_ATOMIC | __GFP_NOWARN); | |
1533 | if (unlikely(skb == NULL)) { | |
1534 | netdev_dbg(vif->dev, | |
1535 | "Can't allocate a skb in start_xmit.\n"); | |
1536 | netbk_tx_err(vif, &txreq, idx); | |
1537 | break; | |
1538 | } | |
1539 | ||
1540 | /* Packets passed to netif_rx() must have some headroom. */ | |
1541 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); | |
1542 | ||
1543 | if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { | |
1544 | struct xen_netif_extra_info *gso; | |
1545 | gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; | |
1546 | ||
1547 | if (netbk_set_skb_gso(vif, skb, gso)) { | |
48856286 | 1548 | /* Failure in netbk_set_skb_gso is fatal. */ |
f942dc25 | 1549 | kfree_skb(skb); |
f942dc25 IC |
1550 | continue; |
1551 | } | |
1552 | } | |
1553 | ||
1554 | /* XXX could copy straight to head */ | |
27f85228 | 1555 | page = xen_netbk_alloc_page(netbk, pending_idx); |
f942dc25 IC |
1556 | if (!page) { |
1557 | kfree_skb(skb); | |
1558 | netbk_tx_err(vif, &txreq, idx); | |
1559 | continue; | |
1560 | } | |
1561 | ||
f942dc25 IC |
1562 | gop->source.u.ref = txreq.gref; |
1563 | gop->source.domid = vif->domid; | |
1564 | gop->source.offset = txreq.offset; | |
1565 | ||
1566 | gop->dest.u.gmfn = virt_to_mfn(page_address(page)); | |
1567 | gop->dest.domid = DOMID_SELF; | |
1568 | gop->dest.offset = txreq.offset; | |
1569 | ||
1570 | gop->len = txreq.size; | |
1571 | gop->flags = GNTCOPY_source_gref; | |
1572 | ||
1573 | gop++; | |
1574 | ||
1575 | memcpy(&netbk->pending_tx_info[pending_idx].req, | |
1576 | &txreq, sizeof(txreq)); | |
1577 | netbk->pending_tx_info[pending_idx].vif = vif; | |
2810e5b9 | 1578 | netbk->pending_tx_info[pending_idx].head = index; |
f942dc25 IC |
1579 | *((u16 *)skb->data) = pending_idx; |
1580 | ||
1581 | __skb_put(skb, data_len); | |
1582 | ||
1583 | skb_shinfo(skb)->nr_frags = ret; | |
1584 | if (data_len < txreq.size) { | |
1585 | skb_shinfo(skb)->nr_frags++; | |
ea066ad1 IC |
1586 | frag_set_pending_idx(&skb_shinfo(skb)->frags[0], |
1587 | pending_idx); | |
f942dc25 | 1588 | } else { |
ea066ad1 IC |
1589 | frag_set_pending_idx(&skb_shinfo(skb)->frags[0], |
1590 | INVALID_PENDING_IDX); | |
f942dc25 IC |
1591 | } |
1592 | ||
f942dc25 IC |
1593 | netbk->pending_cons++; |
1594 | ||
1595 | request_gop = xen_netbk_get_requests(netbk, vif, | |
1596 | skb, txfrags, gop); | |
1597 | if (request_gop == NULL) { | |
1598 | kfree_skb(skb); | |
1599 | netbk_tx_err(vif, &txreq, idx); | |
1600 | continue; | |
1601 | } | |
1602 | gop = request_gop; | |
1603 | ||
1e0b6eac AL |
1604 | __skb_queue_tail(&netbk->tx_queue, skb); |
1605 | ||
f942dc25 IC |
1606 | vif->tx.req_cons = idx; |
1607 | xen_netbk_check_rx_xenvif(vif); | |
1608 | ||
1609 | if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops)) | |
1610 | break; | |
1611 | } | |
1612 | ||
1613 | return gop - netbk->tx_copy_ops; | |
1614 | } | |
1615 | ||
1616 | static void xen_netbk_tx_submit(struct xen_netbk *netbk) | |
1617 | { | |
1618 | struct gnttab_copy *gop = netbk->tx_copy_ops; | |
1619 | struct sk_buff *skb; | |
1620 | ||
1621 | while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) { | |
1622 | struct xen_netif_tx_request *txp; | |
1623 | struct xenvif *vif; | |
1624 | u16 pending_idx; | |
1625 | unsigned data_len; | |
1626 | ||
1627 | pending_idx = *((u16 *)skb->data); | |
1628 | vif = netbk->pending_tx_info[pending_idx].vif; | |
1629 | txp = &netbk->pending_tx_info[pending_idx].req; | |
1630 | ||
1631 | /* Check the remap error code. */ | |
1632 | if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) { | |
1633 | netdev_dbg(vif->dev, "netback grant failed.\n"); | |
1634 | skb_shinfo(skb)->nr_frags = 0; | |
1635 | kfree_skb(skb); | |
1636 | continue; | |
1637 | } | |
1638 | ||
1639 | data_len = skb->len; | |
1640 | memcpy(skb->data, | |
1641 | (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset), | |
1642 | data_len); | |
1643 | if (data_len < txp->size) { | |
1644 | /* Append the packet payload as a fragment. */ | |
1645 | txp->offset += data_len; | |
1646 | txp->size -= data_len; | |
1647 | } else { | |
1648 | /* Schedule a response immediately. */ | |
7d5145d8 | 1649 | xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); |
f942dc25 IC |
1650 | } |
1651 | ||
1652 | if (txp->flags & XEN_NETTXF_csum_blank) | |
1653 | skb->ip_summed = CHECKSUM_PARTIAL; | |
1654 | else if (txp->flags & XEN_NETTXF_data_validated) | |
1655 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1656 | ||
1657 | xen_netbk_fill_frags(netbk, skb); | |
1658 | ||
1659 | /* | |
1660 | * If the initial fragment was < PKT_PROT_LEN then | |
1661 | * pull through some bytes from the other fragments to | |
1662 | * increase the linear region to PKT_PROT_LEN bytes. | |
1663 | */ | |
1664 | if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) { | |
1665 | int target = min_t(int, skb->len, PKT_PROT_LEN); | |
1666 | __pskb_pull_tail(skb, target - skb_headlen(skb)); | |
1667 | } | |
1668 | ||
1669 | skb->dev = vif->dev; | |
1670 | skb->protocol = eth_type_trans(skb, skb->dev); | |
f9ca8f74 | 1671 | skb_reset_network_header(skb); |
f942dc25 IC |
1672 | |
1673 | if (checksum_setup(vif, skb)) { | |
1674 | netdev_dbg(vif->dev, | |
1675 | "Can't setup checksum in net_tx_action\n"); | |
1676 | kfree_skb(skb); | |
1677 | continue; | |
1678 | } | |
1679 | ||
40893fd0 | 1680 | skb_probe_transport_header(skb, 0); |
f9ca8f74 | 1681 | |
f942dc25 IC |
1682 | vif->dev->stats.rx_bytes += skb->len; |
1683 | vif->dev->stats.rx_packets++; | |
1684 | ||
1685 | xenvif_receive_skb(vif, skb); | |
1686 | } | |
1687 | } | |
1688 | ||
1689 | /* Called after netfront has transmitted */ | |
1690 | static void xen_netbk_tx_action(struct xen_netbk *netbk) | |
1691 | { | |
1692 | unsigned nr_gops; | |
f942dc25 IC |
1693 | |
1694 | nr_gops = xen_netbk_tx_build_gops(netbk); | |
1695 | ||
1696 | if (nr_gops == 0) | |
1697 | return; | |
f942dc25 | 1698 | |
c571898f | 1699 | gnttab_batch_copy(netbk->tx_copy_ops, nr_gops); |
f942dc25 | 1700 | |
c571898f | 1701 | xen_netbk_tx_submit(netbk); |
f942dc25 IC |
1702 | } |
1703 | ||
7d5145d8 MD |
1704 | static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, |
1705 | u8 status) | |
f942dc25 IC |
1706 | { |
1707 | struct xenvif *vif; | |
1708 | struct pending_tx_info *pending_tx_info; | |
2810e5b9 WL |
1709 | pending_ring_idx_t head; |
1710 | u16 peek; /* peek into next tx request */ | |
1711 | ||
1712 | BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL)); | |
f942dc25 IC |
1713 | |
1714 | /* Already complete? */ | |
1715 | if (netbk->mmap_pages[pending_idx] == NULL) | |
1716 | return; | |
1717 | ||
1718 | pending_tx_info = &netbk->pending_tx_info[pending_idx]; | |
1719 | ||
1720 | vif = pending_tx_info->vif; | |
2810e5b9 | 1721 | head = pending_tx_info->head; |
f942dc25 | 1722 | |
2810e5b9 WL |
1723 | BUG_ON(!pending_tx_is_head(netbk, head)); |
1724 | BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx); | |
f942dc25 | 1725 | |
2810e5b9 WL |
1726 | do { |
1727 | pending_ring_idx_t index; | |
1728 | pending_ring_idx_t idx = pending_index(head); | |
1729 | u16 info_idx = netbk->pending_ring[idx]; | |
f942dc25 | 1730 | |
2810e5b9 WL |
1731 | pending_tx_info = &netbk->pending_tx_info[info_idx]; |
1732 | make_tx_response(vif, &pending_tx_info->req, status); | |
1733 | ||
1734 | /* Setting any number other than | |
1735 | * INVALID_PENDING_RING_IDX indicates this slot is | |
1736 | * starting a new packet / ending a previous packet. | |
1737 | */ | |
1738 | pending_tx_info->head = 0; | |
1739 | ||
1740 | index = pending_index(netbk->pending_prod++); | |
1741 | netbk->pending_ring[index] = netbk->pending_ring[info_idx]; | |
f942dc25 | 1742 | |
2810e5b9 WL |
1743 | xenvif_put(vif); |
1744 | ||
1745 | peek = netbk->pending_ring[pending_index(++head)]; | |
1746 | ||
1747 | } while (!pending_tx_is_head(netbk, peek)); | |
1748 | ||
1749 | netbk->mmap_pages[pending_idx]->mapping = 0; | |
f942dc25 IC |
1750 | put_page(netbk->mmap_pages[pending_idx]); |
1751 | netbk->mmap_pages[pending_idx] = NULL; | |
1752 | } | |
1753 | ||
2810e5b9 | 1754 | |
f942dc25 IC |
1755 | static void make_tx_response(struct xenvif *vif, |
1756 | struct xen_netif_tx_request *txp, | |
1757 | s8 st) | |
1758 | { | |
1759 | RING_IDX i = vif->tx.rsp_prod_pvt; | |
1760 | struct xen_netif_tx_response *resp; | |
1761 | int notify; | |
1762 | ||
1763 | resp = RING_GET_RESPONSE(&vif->tx, i); | |
1764 | resp->id = txp->id; | |
1765 | resp->status = st; | |
1766 | ||
1767 | if (txp->flags & XEN_NETTXF_extra_info) | |
1768 | RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL; | |
1769 | ||
1770 | vif->tx.rsp_prod_pvt = ++i; | |
1771 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify); | |
1772 | if (notify) | |
e1f00a69 | 1773 | notify_remote_via_irq(vif->tx_irq); |
f942dc25 IC |
1774 | } |
1775 | ||
1776 | static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, | |
1777 | u16 id, | |
1778 | s8 st, | |
1779 | u16 offset, | |
1780 | u16 size, | |
1781 | u16 flags) | |
1782 | { | |
1783 | RING_IDX i = vif->rx.rsp_prod_pvt; | |
1784 | struct xen_netif_rx_response *resp; | |
1785 | ||
1786 | resp = RING_GET_RESPONSE(&vif->rx, i); | |
1787 | resp->offset = offset; | |
1788 | resp->flags = flags; | |
1789 | resp->id = id; | |
1790 | resp->status = (s16)size; | |
1791 | if (st < 0) | |
1792 | resp->status = (s16)st; | |
1793 | ||
1794 | vif->rx.rsp_prod_pvt = ++i; | |
1795 | ||
1796 | return resp; | |
1797 | } | |
1798 | ||
1799 | static inline int rx_work_todo(struct xen_netbk *netbk) | |
1800 | { | |
1801 | return !skb_queue_empty(&netbk->rx_queue); | |
1802 | } | |
1803 | ||
1804 | static inline int tx_work_todo(struct xen_netbk *netbk) | |
1805 | { | |
1806 | ||
37641494 | 1807 | if ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX |
2810e5b9 WL |
1808 | < MAX_PENDING_REQS) && |
1809 | !list_empty(&netbk->net_schedule_list)) | |
f942dc25 IC |
1810 | return 1; |
1811 | ||
1812 | return 0; | |
1813 | } | |
1814 | ||
1815 | static int xen_netbk_kthread(void *data) | |
1816 | { | |
1817 | struct xen_netbk *netbk = data; | |
1818 | while (!kthread_should_stop()) { | |
1819 | wait_event_interruptible(netbk->wq, | |
1820 | rx_work_todo(netbk) || | |
1821 | tx_work_todo(netbk) || | |
1822 | kthread_should_stop()); | |
1823 | cond_resched(); | |
1824 | ||
1825 | if (kthread_should_stop()) | |
1826 | break; | |
1827 | ||
1828 | if (rx_work_todo(netbk)) | |
1829 | xen_netbk_rx_action(netbk); | |
1830 | ||
1831 | if (tx_work_todo(netbk)) | |
1832 | xen_netbk_tx_action(netbk); | |
1833 | } | |
1834 | ||
1835 | return 0; | |
1836 | } | |
1837 | ||
1838 | void xen_netbk_unmap_frontend_rings(struct xenvif *vif) | |
1839 | { | |
c9d63699 DV |
1840 | if (vif->tx.sring) |
1841 | xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), | |
1842 | vif->tx.sring); | |
1843 | if (vif->rx.sring) | |
1844 | xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), | |
1845 | vif->rx.sring); | |
f942dc25 IC |
1846 | } |
1847 | ||
1848 | int xen_netbk_map_frontend_rings(struct xenvif *vif, | |
1849 | grant_ref_t tx_ring_ref, | |
1850 | grant_ref_t rx_ring_ref) | |
1851 | { | |
c9d63699 | 1852 | void *addr; |
f942dc25 IC |
1853 | struct xen_netif_tx_sring *txs; |
1854 | struct xen_netif_rx_sring *rxs; | |
1855 | ||
1856 | int err = -ENOMEM; | |
1857 | ||
c9d63699 DV |
1858 | err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), |
1859 | tx_ring_ref, &addr); | |
1860 | if (err) | |
f942dc25 IC |
1861 | goto err; |
1862 | ||
c9d63699 | 1863 | txs = (struct xen_netif_tx_sring *)addr; |
f942dc25 IC |
1864 | BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE); |
1865 | ||
c9d63699 DV |
1866 | err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), |
1867 | rx_ring_ref, &addr); | |
1868 | if (err) | |
f942dc25 | 1869 | goto err; |
f942dc25 | 1870 | |
c9d63699 | 1871 | rxs = (struct xen_netif_rx_sring *)addr; |
f942dc25 IC |
1872 | BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE); |
1873 | ||
c9d63699 DV |
1874 | vif->rx_req_cons_peek = 0; |
1875 | ||
f942dc25 IC |
1876 | return 0; |
1877 | ||
1878 | err: | |
1879 | xen_netbk_unmap_frontend_rings(vif); | |
1880 | return err; | |
1881 | } | |
1882 | ||
1883 | static int __init netback_init(void) | |
1884 | { | |
1885 | int i; | |
1886 | int rc = 0; | |
1887 | int group; | |
1888 | ||
2a14b244 | 1889 | if (!xen_domain()) |
f942dc25 IC |
1890 | return -ENODEV; |
1891 | ||
37641494 | 1892 | if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) { |
2810e5b9 | 1893 | printk(KERN_INFO |
37641494 WL |
1894 | "xen-netback: fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n", |
1895 | fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX); | |
1896 | fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX; | |
2810e5b9 WL |
1897 | } |
1898 | ||
f942dc25 IC |
1899 | xen_netbk_group_nr = num_online_cpus(); |
1900 | xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr); | |
e404decb | 1901 | if (!xen_netbk) |
f942dc25 | 1902 | return -ENOMEM; |
f942dc25 IC |
1903 | |
1904 | for (group = 0; group < xen_netbk_group_nr; group++) { | |
1905 | struct xen_netbk *netbk = &xen_netbk[group]; | |
1906 | skb_queue_head_init(&netbk->rx_queue); | |
1907 | skb_queue_head_init(&netbk->tx_queue); | |
1908 | ||
1909 | init_timer(&netbk->net_timer); | |
1910 | netbk->net_timer.data = (unsigned long)netbk; | |
1911 | netbk->net_timer.function = xen_netbk_alarm; | |
1912 | ||
1913 | netbk->pending_cons = 0; | |
1914 | netbk->pending_prod = MAX_PENDING_REQS; | |
1915 | for (i = 0; i < MAX_PENDING_REQS; i++) | |
1916 | netbk->pending_ring[i] = i; | |
1917 | ||
1918 | init_waitqueue_head(&netbk->wq); | |
1919 | netbk->task = kthread_create(xen_netbk_kthread, | |
1920 | (void *)netbk, | |
1921 | "netback/%u", group); | |
1922 | ||
1923 | if (IS_ERR(netbk->task)) { | |
6b84bd16 | 1924 | printk(KERN_ALERT "kthread_create() fails at netback\n"); |
f942dc25 IC |
1925 | del_timer(&netbk->net_timer); |
1926 | rc = PTR_ERR(netbk->task); | |
1927 | goto failed_init; | |
1928 | } | |
1929 | ||
1930 | kthread_bind(netbk->task, group); | |
1931 | ||
1932 | INIT_LIST_HEAD(&netbk->net_schedule_list); | |
1933 | ||
1934 | spin_lock_init(&netbk->net_schedule_list_lock); | |
1935 | ||
1936 | atomic_set(&netbk->netfront_count, 0); | |
1937 | ||
1938 | wake_up_process(netbk->task); | |
1939 | } | |
1940 | ||
1941 | rc = xenvif_xenbus_init(); | |
1942 | if (rc) | |
1943 | goto failed_init; | |
1944 | ||
1945 | return 0; | |
1946 | ||
1947 | failed_init: | |
1948 | while (--group >= 0) { | |
1949 | struct xen_netbk *netbk = &xen_netbk[group]; | |
f942dc25 IC |
1950 | del_timer(&netbk->net_timer); |
1951 | kthread_stop(netbk->task); | |
1952 | } | |
1953 | vfree(xen_netbk); | |
1954 | return rc; | |
1955 | ||
1956 | } | |
1957 | ||
1958 | module_init(netback_init); | |
1959 | ||
b103f358 WL |
1960 | static void __exit netback_fini(void) |
1961 | { | |
1962 | int i, j; | |
1963 | ||
1964 | xenvif_xenbus_fini(); | |
1965 | ||
1966 | for (i = 0; i < xen_netbk_group_nr; i++) { | |
1967 | struct xen_netbk *netbk = &xen_netbk[i]; | |
1968 | del_timer_sync(&netbk->net_timer); | |
1969 | kthread_stop(netbk->task); | |
1970 | for (j = 0; j < MAX_PENDING_REQS; j++) { | |
07cc61bf DC |
1971 | if (netbk->mmap_pages[j]) |
1972 | __free_page(netbk->mmap_pages[j]); | |
b103f358 WL |
1973 | } |
1974 | } | |
1975 | ||
1976 | vfree(xen_netbk); | |
1977 | } | |
1978 | module_exit(netback_fini); | |
1979 | ||
f942dc25 | 1980 | MODULE_LICENSE("Dual BSD/GPL"); |
f984cec6 | 1981 | MODULE_ALIAS("xen-backend:vif"); |