Commit | Line | Data |
---|---|---|
0d160211 JF |
1 | /* |
2 | * Virtual network driver for conversing with remote driver backends. | |
3 | * | |
4 | * Copyright (c) 2002-2005, K A Fraser | |
5 | * Copyright (c) 2005, XenSource Ltd | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License version 2 | |
9 | * as published by the Free Software Foundation; or, when distributed | |
10 | * separately from the Linux kernel or incorporated into other | |
11 | * software packages, subject to the following license: | |
12 | * | |
13 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
14 | * of this source file (the "Software"), to deal in the Software without | |
15 | * restriction, including without limitation the rights to use, copy, modify, | |
16 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | |
17 | * and to permit persons to whom the Software is furnished to do so, subject to | |
18 | * the following conditions: | |
19 | * | |
20 | * The above copyright notice and this permission notice shall be included in | |
21 | * all copies or substantial portions of the Software. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
24 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
25 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
26 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
27 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
28 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
29 | * IN THE SOFTWARE. | |
30 | */ | |
31 | ||
32 | #include <linux/module.h> | |
33 | #include <linux/kernel.h> | |
34 | #include <linux/netdevice.h> | |
35 | #include <linux/etherdevice.h> | |
36 | #include <linux/skbuff.h> | |
37 | #include <linux/ethtool.h> | |
38 | #include <linux/if_ether.h> | |
39 | #include <linux/tcp.h> | |
40 | #include <linux/udp.h> | |
41 | #include <linux/moduleparam.h> | |
42 | #include <linux/mm.h> | |
43 | #include <net/ip.h> | |
44 | ||
45 | #include <xen/xenbus.h> | |
46 | #include <xen/events.h> | |
47 | #include <xen/page.h> | |
48 | #include <xen/grant_table.h> | |
49 | ||
50 | #include <xen/interface/io/netif.h> | |
51 | #include <xen/interface/memory.h> | |
52 | #include <xen/interface/grant_table.h> | |
53 | ||
54 | static struct ethtool_ops xennet_ethtool_ops; | |
55 | ||
56 | struct netfront_cb { | |
57 | struct page *page; | |
58 | unsigned offset; | |
59 | }; | |
60 | ||
61 | #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) | |
62 | ||
63 | #define RX_COPY_THRESHOLD 256 | |
64 | ||
65 | #define GRANT_INVALID_REF 0 | |
66 | ||
67 | #define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE) | |
68 | #define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE) | |
69 | #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) | |
70 | ||
71 | struct netfront_info { | |
72 | struct list_head list; | |
73 | struct net_device *netdev; | |
74 | ||
bea3348e | 75 | struct napi_struct napi; |
0d160211 JF |
76 | struct net_device_stats stats; |
77 | ||
78 | struct xen_netif_tx_front_ring tx; | |
79 | struct xen_netif_rx_front_ring rx; | |
80 | ||
81 | spinlock_t tx_lock; | |
82 | spinlock_t rx_lock; | |
83 | ||
84 | unsigned int evtchn; | |
85 | ||
86 | /* Receive-ring batched refills. */ | |
87 | #define RX_MIN_TARGET 8 | |
88 | #define RX_DFL_MIN_TARGET 64 | |
89 | #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) | |
90 | unsigned rx_min_target, rx_max_target, rx_target; | |
91 | struct sk_buff_head rx_batch; | |
92 | ||
93 | struct timer_list rx_refill_timer; | |
94 | ||
95 | /* | |
96 | * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries | |
97 | * are linked from tx_skb_freelist through skb_entry.link. | |
98 | * | |
99 | * NB. Freelist index entries are always going to be less than | |
100 | * PAGE_OFFSET, whereas pointers to skbs will always be equal or | |
101 | * greater than PAGE_OFFSET: we use this property to distinguish | |
102 | * them. | |
103 | */ | |
104 | union skb_entry { | |
105 | struct sk_buff *skb; | |
106 | unsigned link; | |
107 | } tx_skbs[NET_TX_RING_SIZE]; | |
108 | grant_ref_t gref_tx_head; | |
109 | grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; | |
110 | unsigned tx_skb_freelist; | |
111 | ||
112 | struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; | |
113 | grant_ref_t gref_rx_head; | |
114 | grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; | |
115 | ||
116 | struct xenbus_device *xbdev; | |
117 | int tx_ring_ref; | |
118 | int rx_ring_ref; | |
119 | ||
120 | unsigned long rx_pfn_array[NET_RX_RING_SIZE]; | |
121 | struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; | |
122 | struct mmu_update rx_mmu[NET_RX_RING_SIZE]; | |
123 | }; | |
124 | ||
125 | struct netfront_rx_info { | |
126 | struct xen_netif_rx_response rx; | |
127 | struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; | |
128 | }; | |
129 | ||
130 | /* | |
131 | * Access macros for acquiring freeing slots in tx_skbs[]. | |
132 | */ | |
133 | ||
134 | static void add_id_to_freelist(unsigned *head, union skb_entry *list, | |
135 | unsigned short id) | |
136 | { | |
137 | list[id].link = *head; | |
138 | *head = id; | |
139 | } | |
140 | ||
141 | static unsigned short get_id_from_freelist(unsigned *head, | |
142 | union skb_entry *list) | |
143 | { | |
144 | unsigned int id = *head; | |
145 | *head = list[id].link; | |
146 | return id; | |
147 | } | |
148 | ||
149 | static int xennet_rxidx(RING_IDX idx) | |
150 | { | |
151 | return idx & (NET_RX_RING_SIZE - 1); | |
152 | } | |
153 | ||
154 | static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, | |
155 | RING_IDX ri) | |
156 | { | |
157 | int i = xennet_rxidx(ri); | |
158 | struct sk_buff *skb = np->rx_skbs[i]; | |
159 | np->rx_skbs[i] = NULL; | |
160 | return skb; | |
161 | } | |
162 | ||
163 | static grant_ref_t xennet_get_rx_ref(struct netfront_info *np, | |
164 | RING_IDX ri) | |
165 | { | |
166 | int i = xennet_rxidx(ri); | |
167 | grant_ref_t ref = np->grant_rx_ref[i]; | |
168 | np->grant_rx_ref[i] = GRANT_INVALID_REF; | |
169 | return ref; | |
170 | } | |
171 | ||
172 | #ifdef CONFIG_SYSFS | |
173 | static int xennet_sysfs_addif(struct net_device *netdev); | |
174 | static void xennet_sysfs_delif(struct net_device *netdev); | |
175 | #else /* !CONFIG_SYSFS */ | |
176 | #define xennet_sysfs_addif(dev) (0) | |
177 | #define xennet_sysfs_delif(dev) do { } while (0) | |
178 | #endif | |
179 | ||
180 | static int xennet_can_sg(struct net_device *dev) | |
181 | { | |
182 | return dev->features & NETIF_F_SG; | |
183 | } | |
184 | ||
185 | ||
186 | static void rx_refill_timeout(unsigned long data) | |
187 | { | |
188 | struct net_device *dev = (struct net_device *)data; | |
bea3348e SH |
189 | struct netfront_info *np = netdev_priv(dev); |
190 | netif_rx_schedule(dev, &np->napi); | |
0d160211 JF |
191 | } |
192 | ||
193 | static int netfront_tx_slot_available(struct netfront_info *np) | |
194 | { | |
195 | return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < | |
196 | (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); | |
197 | } | |
198 | ||
199 | static void xennet_maybe_wake_tx(struct net_device *dev) | |
200 | { | |
201 | struct netfront_info *np = netdev_priv(dev); | |
202 | ||
203 | if (unlikely(netif_queue_stopped(dev)) && | |
204 | netfront_tx_slot_available(np) && | |
205 | likely(netif_running(dev))) | |
206 | netif_wake_queue(dev); | |
207 | } | |
208 | ||
209 | static void xennet_alloc_rx_buffers(struct net_device *dev) | |
210 | { | |
211 | unsigned short id; | |
212 | struct netfront_info *np = netdev_priv(dev); | |
213 | struct sk_buff *skb; | |
214 | struct page *page; | |
215 | int i, batch_target, notify; | |
216 | RING_IDX req_prod = np->rx.req_prod_pvt; | |
0d160211 JF |
217 | grant_ref_t ref; |
218 | unsigned long pfn; | |
219 | void *vaddr; | |
0d160211 JF |
220 | struct xen_netif_rx_request *req; |
221 | ||
222 | if (unlikely(!netif_carrier_ok(dev))) | |
223 | return; | |
224 | ||
225 | /* | |
226 | * Allocate skbuffs greedily, even though we batch updates to the | |
227 | * receive ring. This creates a less bursty demand on the memory | |
228 | * allocator, so should reduce the chance of failed allocation requests | |
229 | * both for ourself and for other kernel subsystems. | |
230 | */ | |
231 | batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); | |
232 | for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { | |
233 | skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD, | |
234 | GFP_ATOMIC | __GFP_NOWARN); | |
235 | if (unlikely(!skb)) | |
236 | goto no_skb; | |
237 | ||
238 | page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); | |
239 | if (!page) { | |
240 | kfree_skb(skb); | |
241 | no_skb: | |
242 | /* Any skbuffs queued for refill? Force them out. */ | |
243 | if (i != 0) | |
244 | goto refill; | |
245 | /* Could not allocate any skbuffs. Try again later. */ | |
246 | mod_timer(&np->rx_refill_timer, | |
247 | jiffies + (HZ/10)); | |
248 | break; | |
249 | } | |
250 | ||
251 | skb_shinfo(skb)->frags[0].page = page; | |
252 | skb_shinfo(skb)->nr_frags = 1; | |
253 | __skb_queue_tail(&np->rx_batch, skb); | |
254 | } | |
255 | ||
256 | /* Is the batch large enough to be worthwhile? */ | |
257 | if (i < (np->rx_target/2)) { | |
258 | if (req_prod > np->rx.sring->req_prod) | |
259 | goto push; | |
260 | return; | |
261 | } | |
262 | ||
263 | /* Adjust our fill target if we risked running out of buffers. */ | |
264 | if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && | |
265 | ((np->rx_target *= 2) > np->rx_max_target)) | |
266 | np->rx_target = np->rx_max_target; | |
267 | ||
268 | refill: | |
5dcddfae | 269 | for (i = 0; ; i++) { |
0d160211 JF |
270 | skb = __skb_dequeue(&np->rx_batch); |
271 | if (skb == NULL) | |
272 | break; | |
273 | ||
274 | skb->dev = dev; | |
275 | ||
276 | id = xennet_rxidx(req_prod + i); | |
277 | ||
278 | BUG_ON(np->rx_skbs[id]); | |
279 | np->rx_skbs[id] = skb; | |
280 | ||
281 | ref = gnttab_claim_grant_reference(&np->gref_rx_head); | |
282 | BUG_ON((signed short)ref < 0); | |
283 | np->grant_rx_ref[id] = ref; | |
284 | ||
285 | pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); | |
286 | vaddr = page_address(skb_shinfo(skb)->frags[0].page); | |
287 | ||
288 | req = RING_GET_REQUEST(&np->rx, req_prod + i); | |
289 | gnttab_grant_foreign_access_ref(ref, | |
290 | np->xbdev->otherend_id, | |
291 | pfn_to_mfn(pfn), | |
292 | 0); | |
293 | ||
294 | req->id = id; | |
295 | req->gref = ref; | |
296 | } | |
297 | ||
5dcddfae | 298 | wmb(); /* barrier so backend seens requests */ |
0d160211 JF |
299 | |
300 | /* Above is a suitable barrier to ensure backend will see requests. */ | |
301 | np->rx.req_prod_pvt = req_prod + i; | |
302 | push: | |
303 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); | |
304 | if (notify) | |
305 | notify_remote_via_irq(np->netdev->irq); | |
306 | } | |
307 | ||
308 | static int xennet_open(struct net_device *dev) | |
309 | { | |
310 | struct netfront_info *np = netdev_priv(dev); | |
311 | ||
312 | memset(&np->stats, 0, sizeof(np->stats)); | |
313 | ||
bea3348e SH |
314 | napi_enable(&np->napi); |
315 | ||
0d160211 JF |
316 | spin_lock_bh(&np->rx_lock); |
317 | if (netif_carrier_ok(dev)) { | |
318 | xennet_alloc_rx_buffers(dev); | |
319 | np->rx.sring->rsp_event = np->rx.rsp_cons + 1; | |
320 | if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) | |
bea3348e | 321 | netif_rx_schedule(dev, &np->napi); |
0d160211 JF |
322 | } |
323 | spin_unlock_bh(&np->rx_lock); | |
324 | ||
325 | xennet_maybe_wake_tx(dev); | |
326 | ||
327 | return 0; | |
328 | } | |
329 | ||
330 | static void xennet_tx_buf_gc(struct net_device *dev) | |
331 | { | |
332 | RING_IDX cons, prod; | |
333 | unsigned short id; | |
334 | struct netfront_info *np = netdev_priv(dev); | |
335 | struct sk_buff *skb; | |
336 | ||
337 | BUG_ON(!netif_carrier_ok(dev)); | |
338 | ||
339 | do { | |
340 | prod = np->tx.sring->rsp_prod; | |
341 | rmb(); /* Ensure we see responses up to 'rp'. */ | |
342 | ||
343 | for (cons = np->tx.rsp_cons; cons != prod; cons++) { | |
344 | struct xen_netif_tx_response *txrsp; | |
345 | ||
346 | txrsp = RING_GET_RESPONSE(&np->tx, cons); | |
347 | if (txrsp->status == NETIF_RSP_NULL) | |
348 | continue; | |
349 | ||
350 | id = txrsp->id; | |
351 | skb = np->tx_skbs[id].skb; | |
352 | if (unlikely(gnttab_query_foreign_access( | |
353 | np->grant_tx_ref[id]) != 0)) { | |
354 | printk(KERN_ALERT "xennet_tx_buf_gc: warning " | |
355 | "-- grant still in use by backend " | |
356 | "domain.\n"); | |
357 | BUG(); | |
358 | } | |
359 | gnttab_end_foreign_access_ref( | |
360 | np->grant_tx_ref[id], GNTMAP_readonly); | |
361 | gnttab_release_grant_reference( | |
362 | &np->gref_tx_head, np->grant_tx_ref[id]); | |
363 | np->grant_tx_ref[id] = GRANT_INVALID_REF; | |
364 | add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); | |
365 | dev_kfree_skb_irq(skb); | |
366 | } | |
367 | ||
368 | np->tx.rsp_cons = prod; | |
369 | ||
370 | /* | |
371 | * Set a new event, then check for race with update of tx_cons. | |
372 | * Note that it is essential to schedule a callback, no matter | |
373 | * how few buffers are pending. Even if there is space in the | |
374 | * transmit ring, higher layers may be blocked because too much | |
375 | * data is outstanding: in such cases notification from Xen is | |
376 | * likely to be the only kick that we'll get. | |
377 | */ | |
378 | np->tx.sring->rsp_event = | |
379 | prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; | |
380 | mb(); /* update shared area */ | |
381 | } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); | |
382 | ||
383 | xennet_maybe_wake_tx(dev); | |
384 | } | |
385 | ||
386 | static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, | |
387 | struct xen_netif_tx_request *tx) | |
388 | { | |
389 | struct netfront_info *np = netdev_priv(dev); | |
390 | char *data = skb->data; | |
391 | unsigned long mfn; | |
392 | RING_IDX prod = np->tx.req_prod_pvt; | |
393 | int frags = skb_shinfo(skb)->nr_frags; | |
394 | unsigned int offset = offset_in_page(data); | |
395 | unsigned int len = skb_headlen(skb); | |
396 | unsigned int id; | |
397 | grant_ref_t ref; | |
398 | int i; | |
399 | ||
400 | /* While the header overlaps a page boundary (including being | |
401 | larger than a page), split it it into page-sized chunks. */ | |
402 | while (len > PAGE_SIZE - offset) { | |
403 | tx->size = PAGE_SIZE - offset; | |
404 | tx->flags |= NETTXF_more_data; | |
405 | len -= tx->size; | |
406 | data += tx->size; | |
407 | offset = 0; | |
408 | ||
409 | id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); | |
410 | np->tx_skbs[id].skb = skb_get(skb); | |
411 | tx = RING_GET_REQUEST(&np->tx, prod++); | |
412 | tx->id = id; | |
413 | ref = gnttab_claim_grant_reference(&np->gref_tx_head); | |
414 | BUG_ON((signed short)ref < 0); | |
415 | ||
416 | mfn = virt_to_mfn(data); | |
417 | gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, | |
418 | mfn, GNTMAP_readonly); | |
419 | ||
420 | tx->gref = np->grant_tx_ref[id] = ref; | |
421 | tx->offset = offset; | |
422 | tx->size = len; | |
423 | tx->flags = 0; | |
424 | } | |
425 | ||
426 | /* Grant backend access to each skb fragment page. */ | |
427 | for (i = 0; i < frags; i++) { | |
428 | skb_frag_t *frag = skb_shinfo(skb)->frags + i; | |
429 | ||
430 | tx->flags |= NETTXF_more_data; | |
431 | ||
432 | id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); | |
433 | np->tx_skbs[id].skb = skb_get(skb); | |
434 | tx = RING_GET_REQUEST(&np->tx, prod++); | |
435 | tx->id = id; | |
436 | ref = gnttab_claim_grant_reference(&np->gref_tx_head); | |
437 | BUG_ON((signed short)ref < 0); | |
438 | ||
439 | mfn = pfn_to_mfn(page_to_pfn(frag->page)); | |
440 | gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, | |
441 | mfn, GNTMAP_readonly); | |
442 | ||
443 | tx->gref = np->grant_tx_ref[id] = ref; | |
444 | tx->offset = frag->page_offset; | |
445 | tx->size = frag->size; | |
446 | tx->flags = 0; | |
447 | } | |
448 | ||
449 | np->tx.req_prod_pvt = prod; | |
450 | } | |
451 | ||
452 | static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
453 | { | |
454 | unsigned short id; | |
455 | struct netfront_info *np = netdev_priv(dev); | |
456 | struct xen_netif_tx_request *tx; | |
457 | struct xen_netif_extra_info *extra; | |
458 | char *data = skb->data; | |
459 | RING_IDX i; | |
460 | grant_ref_t ref; | |
461 | unsigned long mfn; | |
462 | int notify; | |
463 | int frags = skb_shinfo(skb)->nr_frags; | |
464 | unsigned int offset = offset_in_page(data); | |
465 | unsigned int len = skb_headlen(skb); | |
466 | ||
467 | frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE; | |
468 | if (unlikely(frags > MAX_SKB_FRAGS + 1)) { | |
469 | printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", | |
470 | frags); | |
471 | dump_stack(); | |
472 | goto drop; | |
473 | } | |
474 | ||
475 | spin_lock_irq(&np->tx_lock); | |
476 | ||
477 | if (unlikely(!netif_carrier_ok(dev) || | |
478 | (frags > 1 && !xennet_can_sg(dev)) || | |
479 | netif_needs_gso(dev, skb))) { | |
480 | spin_unlock_irq(&np->tx_lock); | |
481 | goto drop; | |
482 | } | |
483 | ||
484 | i = np->tx.req_prod_pvt; | |
485 | ||
486 | id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); | |
487 | np->tx_skbs[id].skb = skb; | |
488 | ||
489 | tx = RING_GET_REQUEST(&np->tx, i); | |
490 | ||
491 | tx->id = id; | |
492 | ref = gnttab_claim_grant_reference(&np->gref_tx_head); | |
493 | BUG_ON((signed short)ref < 0); | |
494 | mfn = virt_to_mfn(data); | |
495 | gnttab_grant_foreign_access_ref( | |
496 | ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); | |
497 | tx->gref = np->grant_tx_ref[id] = ref; | |
498 | tx->offset = offset; | |
499 | tx->size = len; | |
500 | extra = NULL; | |
501 | ||
502 | tx->flags = 0; | |
503 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
504 | /* local packet? */ | |
505 | tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; | |
506 | else if (skb->ip_summed == CHECKSUM_UNNECESSARY) | |
507 | /* remote but checksummed. */ | |
508 | tx->flags |= NETTXF_data_validated; | |
509 | ||
510 | if (skb_shinfo(skb)->gso_size) { | |
511 | struct xen_netif_extra_info *gso; | |
512 | ||
513 | gso = (struct xen_netif_extra_info *) | |
514 | RING_GET_REQUEST(&np->tx, ++i); | |
515 | ||
516 | if (extra) | |
517 | extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; | |
518 | else | |
519 | tx->flags |= NETTXF_extra_info; | |
520 | ||
521 | gso->u.gso.size = skb_shinfo(skb)->gso_size; | |
522 | gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; | |
523 | gso->u.gso.pad = 0; | |
524 | gso->u.gso.features = 0; | |
525 | ||
526 | gso->type = XEN_NETIF_EXTRA_TYPE_GSO; | |
527 | gso->flags = 0; | |
528 | extra = gso; | |
529 | } | |
530 | ||
531 | np->tx.req_prod_pvt = i + 1; | |
532 | ||
533 | xennet_make_frags(skb, dev, tx); | |
534 | tx->size = skb->len; | |
535 | ||
536 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); | |
537 | if (notify) | |
538 | notify_remote_via_irq(np->netdev->irq); | |
539 | ||
10a273a6 JF |
540 | np->stats.tx_bytes += skb->len; |
541 | np->stats.tx_packets++; | |
542 | ||
543 | /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ | |
0d160211 JF |
544 | xennet_tx_buf_gc(dev); |
545 | ||
546 | if (!netfront_tx_slot_available(np)) | |
547 | netif_stop_queue(dev); | |
548 | ||
549 | spin_unlock_irq(&np->tx_lock); | |
550 | ||
0d160211 JF |
551 | return 0; |
552 | ||
553 | drop: | |
554 | np->stats.tx_dropped++; | |
555 | dev_kfree_skb(skb); | |
556 | return 0; | |
557 | } | |
558 | ||
559 | static int xennet_close(struct net_device *dev) | |
560 | { | |
561 | struct netfront_info *np = netdev_priv(dev); | |
562 | netif_stop_queue(np->netdev); | |
bea3348e | 563 | napi_disable(&np->napi); |
0d160211 JF |
564 | return 0; |
565 | } | |
566 | ||
567 | static struct net_device_stats *xennet_get_stats(struct net_device *dev) | |
568 | { | |
569 | struct netfront_info *np = netdev_priv(dev); | |
570 | return &np->stats; | |
571 | } | |
572 | ||
573 | static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, | |
574 | grant_ref_t ref) | |
575 | { | |
576 | int new = xennet_rxidx(np->rx.req_prod_pvt); | |
577 | ||
578 | BUG_ON(np->rx_skbs[new]); | |
579 | np->rx_skbs[new] = skb; | |
580 | np->grant_rx_ref[new] = ref; | |
581 | RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; | |
582 | RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; | |
583 | np->rx.req_prod_pvt++; | |
584 | } | |
585 | ||
586 | static int xennet_get_extras(struct netfront_info *np, | |
587 | struct xen_netif_extra_info *extras, | |
588 | RING_IDX rp) | |
589 | ||
590 | { | |
591 | struct xen_netif_extra_info *extra; | |
592 | struct device *dev = &np->netdev->dev; | |
593 | RING_IDX cons = np->rx.rsp_cons; | |
594 | int err = 0; | |
595 | ||
596 | do { | |
597 | struct sk_buff *skb; | |
598 | grant_ref_t ref; | |
599 | ||
600 | if (unlikely(cons + 1 == rp)) { | |
601 | if (net_ratelimit()) | |
602 | dev_warn(dev, "Missing extra info\n"); | |
603 | err = -EBADR; | |
604 | break; | |
605 | } | |
606 | ||
607 | extra = (struct xen_netif_extra_info *) | |
608 | RING_GET_RESPONSE(&np->rx, ++cons); | |
609 | ||
610 | if (unlikely(!extra->type || | |
611 | extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { | |
612 | if (net_ratelimit()) | |
613 | dev_warn(dev, "Invalid extra type: %d\n", | |
614 | extra->type); | |
615 | err = -EINVAL; | |
616 | } else { | |
617 | memcpy(&extras[extra->type - 1], extra, | |
618 | sizeof(*extra)); | |
619 | } | |
620 | ||
621 | skb = xennet_get_rx_skb(np, cons); | |
622 | ref = xennet_get_rx_ref(np, cons); | |
623 | xennet_move_rx_slot(np, skb, ref); | |
624 | } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); | |
625 | ||
626 | np->rx.rsp_cons = cons; | |
627 | return err; | |
628 | } | |
629 | ||
630 | static int xennet_get_responses(struct netfront_info *np, | |
631 | struct netfront_rx_info *rinfo, RING_IDX rp, | |
632 | struct sk_buff_head *list) | |
633 | { | |
634 | struct xen_netif_rx_response *rx = &rinfo->rx; | |
635 | struct xen_netif_extra_info *extras = rinfo->extras; | |
636 | struct device *dev = &np->netdev->dev; | |
637 | RING_IDX cons = np->rx.rsp_cons; | |
638 | struct sk_buff *skb = xennet_get_rx_skb(np, cons); | |
639 | grant_ref_t ref = xennet_get_rx_ref(np, cons); | |
640 | int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); | |
641 | int frags = 1; | |
642 | int err = 0; | |
643 | unsigned long ret; | |
644 | ||
645 | if (rx->flags & NETRXF_extra_info) { | |
646 | err = xennet_get_extras(np, extras, rp); | |
647 | cons = np->rx.rsp_cons; | |
648 | } | |
649 | ||
650 | for (;;) { | |
651 | if (unlikely(rx->status < 0 || | |
652 | rx->offset + rx->status > PAGE_SIZE)) { | |
653 | if (net_ratelimit()) | |
654 | dev_warn(dev, "rx->offset: %x, size: %u\n", | |
655 | rx->offset, rx->status); | |
656 | xennet_move_rx_slot(np, skb, ref); | |
657 | err = -EINVAL; | |
658 | goto next; | |
659 | } | |
660 | ||
661 | /* | |
662 | * This definitely indicates a bug, either in this driver or in | |
663 | * the backend driver. In future this should flag the bad | |
664 | * situation to the system controller to reboot the backed. | |
665 | */ | |
666 | if (ref == GRANT_INVALID_REF) { | |
667 | if (net_ratelimit()) | |
668 | dev_warn(dev, "Bad rx response id %d.\n", | |
669 | rx->id); | |
670 | err = -EINVAL; | |
671 | goto next; | |
672 | } | |
673 | ||
674 | ret = gnttab_end_foreign_access_ref(ref, 0); | |
675 | BUG_ON(!ret); | |
676 | ||
677 | gnttab_release_grant_reference(&np->gref_rx_head, ref); | |
678 | ||
679 | __skb_queue_tail(list, skb); | |
680 | ||
681 | next: | |
682 | if (!(rx->flags & NETRXF_more_data)) | |
683 | break; | |
684 | ||
685 | if (cons + frags == rp) { | |
686 | if (net_ratelimit()) | |
687 | dev_warn(dev, "Need more frags\n"); | |
688 | err = -ENOENT; | |
689 | break; | |
690 | } | |
691 | ||
692 | rx = RING_GET_RESPONSE(&np->rx, cons + frags); | |
693 | skb = xennet_get_rx_skb(np, cons + frags); | |
694 | ref = xennet_get_rx_ref(np, cons + frags); | |
695 | frags++; | |
696 | } | |
697 | ||
698 | if (unlikely(frags > max)) { | |
699 | if (net_ratelimit()) | |
700 | dev_warn(dev, "Too many frags\n"); | |
701 | err = -E2BIG; | |
702 | } | |
703 | ||
704 | if (unlikely(err)) | |
705 | np->rx.rsp_cons = cons + frags; | |
706 | ||
707 | return err; | |
708 | } | |
709 | ||
710 | static int xennet_set_skb_gso(struct sk_buff *skb, | |
711 | struct xen_netif_extra_info *gso) | |
712 | { | |
713 | if (!gso->u.gso.size) { | |
714 | if (net_ratelimit()) | |
715 | printk(KERN_WARNING "GSO size must not be zero.\n"); | |
716 | return -EINVAL; | |
717 | } | |
718 | ||
719 | /* Currently only TCPv4 S.O. is supported. */ | |
720 | if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { | |
721 | if (net_ratelimit()) | |
722 | printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type); | |
723 | return -EINVAL; | |
724 | } | |
725 | ||
726 | skb_shinfo(skb)->gso_size = gso->u.gso.size; | |
727 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; | |
728 | ||
729 | /* Header must be checked, and gso_segs computed. */ | |
730 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | |
731 | skb_shinfo(skb)->gso_segs = 0; | |
732 | ||
733 | return 0; | |
734 | } | |
735 | ||
736 | static RING_IDX xennet_fill_frags(struct netfront_info *np, | |
737 | struct sk_buff *skb, | |
738 | struct sk_buff_head *list) | |
739 | { | |
740 | struct skb_shared_info *shinfo = skb_shinfo(skb); | |
741 | int nr_frags = shinfo->nr_frags; | |
742 | RING_IDX cons = np->rx.rsp_cons; | |
743 | skb_frag_t *frag = shinfo->frags + nr_frags; | |
744 | struct sk_buff *nskb; | |
745 | ||
746 | while ((nskb = __skb_dequeue(list))) { | |
747 | struct xen_netif_rx_response *rx = | |
748 | RING_GET_RESPONSE(&np->rx, ++cons); | |
749 | ||
750 | frag->page = skb_shinfo(nskb)->frags[0].page; | |
751 | frag->page_offset = rx->offset; | |
752 | frag->size = rx->status; | |
753 | ||
754 | skb->data_len += rx->status; | |
755 | ||
756 | skb_shinfo(nskb)->nr_frags = 0; | |
757 | kfree_skb(nskb); | |
758 | ||
759 | frag++; | |
760 | nr_frags++; | |
761 | } | |
762 | ||
763 | shinfo->nr_frags = nr_frags; | |
764 | return cons; | |
765 | } | |
766 | ||
767 | static int skb_checksum_setup(struct sk_buff *skb) | |
768 | { | |
769 | struct iphdr *iph; | |
770 | unsigned char *th; | |
771 | int err = -EPROTO; | |
772 | ||
773 | if (skb->protocol != htons(ETH_P_IP)) | |
774 | goto out; | |
775 | ||
776 | iph = (void *)skb->data; | |
777 | th = skb->data + 4 * iph->ihl; | |
778 | if (th >= skb_tail_pointer(skb)) | |
779 | goto out; | |
780 | ||
781 | skb->csum_start = th - skb->head; | |
782 | switch (iph->protocol) { | |
783 | case IPPROTO_TCP: | |
784 | skb->csum_offset = offsetof(struct tcphdr, check); | |
785 | break; | |
786 | case IPPROTO_UDP: | |
787 | skb->csum_offset = offsetof(struct udphdr, check); | |
788 | break; | |
789 | default: | |
790 | if (net_ratelimit()) | |
791 | printk(KERN_ERR "Attempting to checksum a non-" | |
792 | "TCP/UDP packet, dropping a protocol" | |
793 | " %d packet", iph->protocol); | |
794 | goto out; | |
795 | } | |
796 | ||
797 | if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb)) | |
798 | goto out; | |
799 | ||
800 | err = 0; | |
801 | ||
802 | out: | |
803 | return err; | |
804 | } | |
805 | ||
806 | static int handle_incoming_queue(struct net_device *dev, | |
807 | struct sk_buff_head *rxq) | |
808 | { | |
809 | struct netfront_info *np = netdev_priv(dev); | |
810 | int packets_dropped = 0; | |
811 | struct sk_buff *skb; | |
812 | ||
813 | while ((skb = __skb_dequeue(rxq)) != NULL) { | |
814 | struct page *page = NETFRONT_SKB_CB(skb)->page; | |
815 | void *vaddr = page_address(page); | |
816 | unsigned offset = NETFRONT_SKB_CB(skb)->offset; | |
817 | ||
818 | memcpy(skb->data, vaddr + offset, | |
819 | skb_headlen(skb)); | |
820 | ||
821 | if (page != skb_shinfo(skb)->frags[0].page) | |
822 | __free_page(page); | |
823 | ||
824 | /* Ethernet work: Delayed to here as it peeks the header. */ | |
825 | skb->protocol = eth_type_trans(skb, dev); | |
826 | ||
827 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
828 | if (skb_checksum_setup(skb)) { | |
829 | kfree_skb(skb); | |
830 | packets_dropped++; | |
831 | np->stats.rx_errors++; | |
832 | continue; | |
833 | } | |
834 | } | |
835 | ||
836 | np->stats.rx_packets++; | |
837 | np->stats.rx_bytes += skb->len; | |
838 | ||
839 | /* Pass it up. */ | |
840 | netif_receive_skb(skb); | |
841 | dev->last_rx = jiffies; | |
842 | } | |
843 | ||
844 | return packets_dropped; | |
845 | } | |
846 | ||
bea3348e | 847 | static int xennet_poll(struct napi_struct *napi, int budget) |
0d160211 | 848 | { |
bea3348e SH |
849 | struct netfront_info *np = container_of(napi, struct netfront_info, napi); |
850 | struct net_device *dev = np->netdev; | |
0d160211 JF |
851 | struct sk_buff *skb; |
852 | struct netfront_rx_info rinfo; | |
853 | struct xen_netif_rx_response *rx = &rinfo.rx; | |
854 | struct xen_netif_extra_info *extras = rinfo.extras; | |
855 | RING_IDX i, rp; | |
bea3348e | 856 | int work_done; |
0d160211 JF |
857 | struct sk_buff_head rxq; |
858 | struct sk_buff_head errq; | |
859 | struct sk_buff_head tmpq; | |
860 | unsigned long flags; | |
861 | unsigned int len; | |
862 | int err; | |
863 | ||
864 | spin_lock(&np->rx_lock); | |
865 | ||
866 | if (unlikely(!netif_carrier_ok(dev))) { | |
867 | spin_unlock(&np->rx_lock); | |
868 | return 0; | |
869 | } | |
870 | ||
871 | skb_queue_head_init(&rxq); | |
872 | skb_queue_head_init(&errq); | |
873 | skb_queue_head_init(&tmpq); | |
874 | ||
0d160211 JF |
875 | rp = np->rx.sring->rsp_prod; |
876 | rmb(); /* Ensure we see queued responses up to 'rp'. */ | |
877 | ||
878 | i = np->rx.rsp_cons; | |
879 | work_done = 0; | |
880 | while ((i != rp) && (work_done < budget)) { | |
881 | memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); | |
882 | memset(extras, 0, sizeof(rinfo.extras)); | |
883 | ||
884 | err = xennet_get_responses(np, &rinfo, rp, &tmpq); | |
885 | ||
886 | if (unlikely(err)) { | |
887 | err: | |
888 | while ((skb = __skb_dequeue(&tmpq))) | |
889 | __skb_queue_tail(&errq, skb); | |
890 | np->stats.rx_errors++; | |
891 | i = np->rx.rsp_cons; | |
892 | continue; | |
893 | } | |
894 | ||
895 | skb = __skb_dequeue(&tmpq); | |
896 | ||
897 | if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { | |
898 | struct xen_netif_extra_info *gso; | |
899 | gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; | |
900 | ||
901 | if (unlikely(xennet_set_skb_gso(skb, gso))) { | |
902 | __skb_queue_head(&tmpq, skb); | |
903 | np->rx.rsp_cons += skb_queue_len(&tmpq); | |
904 | goto err; | |
905 | } | |
906 | } | |
907 | ||
908 | NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; | |
909 | NETFRONT_SKB_CB(skb)->offset = rx->offset; | |
910 | ||
911 | len = rx->status; | |
912 | if (len > RX_COPY_THRESHOLD) | |
913 | len = RX_COPY_THRESHOLD; | |
914 | skb_put(skb, len); | |
915 | ||
916 | if (rx->status > len) { | |
917 | skb_shinfo(skb)->frags[0].page_offset = | |
918 | rx->offset + len; | |
919 | skb_shinfo(skb)->frags[0].size = rx->status - len; | |
920 | skb->data_len = rx->status - len; | |
921 | } else { | |
922 | skb_shinfo(skb)->frags[0].page = NULL; | |
923 | skb_shinfo(skb)->nr_frags = 0; | |
924 | } | |
925 | ||
926 | i = xennet_fill_frags(np, skb, &tmpq); | |
927 | ||
928 | /* | |
929 | * Truesize approximates the size of true data plus | |
930 | * any supervisor overheads. Adding hypervisor | |
931 | * overheads has been shown to significantly reduce | |
932 | * achievable bandwidth with the default receive | |
933 | * buffer size. It is therefore not wise to account | |
934 | * for it here. | |
935 | * | |
936 | * After alloc_skb(RX_COPY_THRESHOLD), truesize is set | |
937 | * to RX_COPY_THRESHOLD + the supervisor | |
938 | * overheads. Here, we add the size of the data pulled | |
939 | * in xennet_fill_frags(). | |
940 | * | |
941 | * We also adjust for any unused space in the main | |
942 | * data area by subtracting (RX_COPY_THRESHOLD - | |
943 | * len). This is especially important with drivers | |
944 | * which split incoming packets into header and data, | |
945 | * using only 66 bytes of the main data area (see the | |
946 | * e1000 driver for example.) On such systems, | |
947 | * without this last adjustement, our achievable | |
948 | * receive throughout using the standard receive | |
949 | * buffer size was cut by 25%(!!!). | |
950 | */ | |
951 | skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); | |
952 | skb->len += skb->data_len; | |
953 | ||
954 | if (rx->flags & NETRXF_csum_blank) | |
955 | skb->ip_summed = CHECKSUM_PARTIAL; | |
956 | else if (rx->flags & NETRXF_data_validated) | |
957 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
958 | ||
959 | __skb_queue_tail(&rxq, skb); | |
960 | ||
961 | np->rx.rsp_cons = ++i; | |
962 | work_done++; | |
963 | } | |
964 | ||
965 | while ((skb = __skb_dequeue(&errq))) | |
966 | kfree_skb(skb); | |
967 | ||
968 | work_done -= handle_incoming_queue(dev, &rxq); | |
969 | ||
970 | /* If we get a callback with very few responses, reduce fill target. */ | |
971 | /* NB. Note exponential increase, linear decrease. */ | |
972 | if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > | |
973 | ((3*np->rx_target) / 4)) && | |
974 | (--np->rx_target < np->rx_min_target)) | |
975 | np->rx_target = np->rx_min_target; | |
976 | ||
977 | xennet_alloc_rx_buffers(dev); | |
978 | ||
0d160211 | 979 | if (work_done < budget) { |
bea3348e SH |
980 | int more_to_do = 0; |
981 | ||
0d160211 JF |
982 | local_irq_save(flags); |
983 | ||
984 | RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); | |
985 | if (!more_to_do) | |
bea3348e | 986 | __netif_rx_complete(dev, napi); |
0d160211 JF |
987 | |
988 | local_irq_restore(flags); | |
989 | } | |
990 | ||
991 | spin_unlock(&np->rx_lock); | |
992 | ||
bea3348e | 993 | return work_done; |
0d160211 JF |
994 | } |
995 | ||
996 | static int xennet_change_mtu(struct net_device *dev, int mtu) | |
997 | { | |
998 | int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; | |
999 | ||
1000 | if (mtu > max) | |
1001 | return -EINVAL; | |
1002 | dev->mtu = mtu; | |
1003 | return 0; | |
1004 | } | |
1005 | ||
1006 | static void xennet_release_tx_bufs(struct netfront_info *np) | |
1007 | { | |
1008 | struct sk_buff *skb; | |
1009 | int i; | |
1010 | ||
1011 | for (i = 0; i < NET_TX_RING_SIZE; i++) { | |
1012 | /* Skip over entries which are actually freelist references */ | |
1013 | if ((unsigned long)np->tx_skbs[i].skb < PAGE_OFFSET) | |
1014 | continue; | |
1015 | ||
1016 | skb = np->tx_skbs[i].skb; | |
1017 | gnttab_end_foreign_access_ref(np->grant_tx_ref[i], | |
1018 | GNTMAP_readonly); | |
1019 | gnttab_release_grant_reference(&np->gref_tx_head, | |
1020 | np->grant_tx_ref[i]); | |
1021 | np->grant_tx_ref[i] = GRANT_INVALID_REF; | |
1022 | add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); | |
1023 | dev_kfree_skb_irq(skb); | |
1024 | } | |
1025 | } | |
1026 | ||
1027 | static void xennet_release_rx_bufs(struct netfront_info *np) | |
1028 | { | |
1029 | struct mmu_update *mmu = np->rx_mmu; | |
1030 | struct multicall_entry *mcl = np->rx_mcl; | |
1031 | struct sk_buff_head free_list; | |
1032 | struct sk_buff *skb; | |
1033 | unsigned long mfn; | |
1034 | int xfer = 0, noxfer = 0, unused = 0; | |
1035 | int id, ref; | |
1036 | ||
1037 | dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n", | |
1038 | __func__); | |
1039 | return; | |
1040 | ||
1041 | skb_queue_head_init(&free_list); | |
1042 | ||
1043 | spin_lock_bh(&np->rx_lock); | |
1044 | ||
1045 | for (id = 0; id < NET_RX_RING_SIZE; id++) { | |
1046 | ref = np->grant_rx_ref[id]; | |
1047 | if (ref == GRANT_INVALID_REF) { | |
1048 | unused++; | |
1049 | continue; | |
1050 | } | |
1051 | ||
1052 | skb = np->rx_skbs[id]; | |
1053 | mfn = gnttab_end_foreign_transfer_ref(ref); | |
1054 | gnttab_release_grant_reference(&np->gref_rx_head, ref); | |
1055 | np->grant_rx_ref[id] = GRANT_INVALID_REF; | |
1056 | ||
1057 | if (0 == mfn) { | |
1058 | skb_shinfo(skb)->nr_frags = 0; | |
1059 | dev_kfree_skb(skb); | |
1060 | noxfer++; | |
1061 | continue; | |
1062 | } | |
1063 | ||
1064 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | |
1065 | /* Remap the page. */ | |
1066 | struct page *page = skb_shinfo(skb)->frags[0].page; | |
1067 | unsigned long pfn = page_to_pfn(page); | |
1068 | void *vaddr = page_address(page); | |
1069 | ||
1070 | MULTI_update_va_mapping(mcl, (unsigned long)vaddr, | |
1071 | mfn_pte(mfn, PAGE_KERNEL), | |
1072 | 0); | |
1073 | mcl++; | |
1074 | mmu->ptr = ((u64)mfn << PAGE_SHIFT) | |
1075 | | MMU_MACHPHYS_UPDATE; | |
1076 | mmu->val = pfn; | |
1077 | mmu++; | |
1078 | ||
1079 | set_phys_to_machine(pfn, mfn); | |
1080 | } | |
1081 | __skb_queue_tail(&free_list, skb); | |
1082 | xfer++; | |
1083 | } | |
1084 | ||
1085 | dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n", | |
1086 | __func__, xfer, noxfer, unused); | |
1087 | ||
1088 | if (xfer) { | |
1089 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | |
1090 | /* Do all the remapping work and M2P updates. */ | |
1091 | MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu, | |
1092 | 0, DOMID_SELF); | |
1093 | mcl++; | |
1094 | HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl); | |
1095 | } | |
1096 | } | |
1097 | ||
1098 | while ((skb = __skb_dequeue(&free_list)) != NULL) | |
1099 | dev_kfree_skb(skb); | |
1100 | ||
1101 | spin_unlock_bh(&np->rx_lock); | |
1102 | } | |
1103 | ||
1104 | static void xennet_uninit(struct net_device *dev) | |
1105 | { | |
1106 | struct netfront_info *np = netdev_priv(dev); | |
1107 | xennet_release_tx_bufs(np); | |
1108 | xennet_release_rx_bufs(np); | |
1109 | gnttab_free_grant_references(np->gref_tx_head); | |
1110 | gnttab_free_grant_references(np->gref_rx_head); | |
1111 | } | |
1112 | ||
1113 | static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) | |
1114 | { | |
1115 | int i, err; | |
1116 | struct net_device *netdev; | |
1117 | struct netfront_info *np; | |
1118 | ||
1119 | netdev = alloc_etherdev(sizeof(struct netfront_info)); | |
1120 | if (!netdev) { | |
1121 | printk(KERN_WARNING "%s> alloc_etherdev failed.\n", | |
1122 | __func__); | |
1123 | return ERR_PTR(-ENOMEM); | |
1124 | } | |
1125 | ||
1126 | np = netdev_priv(netdev); | |
1127 | np->xbdev = dev; | |
1128 | ||
1129 | spin_lock_init(&np->tx_lock); | |
1130 | spin_lock_init(&np->rx_lock); | |
1131 | ||
1132 | skb_queue_head_init(&np->rx_batch); | |
1133 | np->rx_target = RX_DFL_MIN_TARGET; | |
1134 | np->rx_min_target = RX_DFL_MIN_TARGET; | |
1135 | np->rx_max_target = RX_MAX_TARGET; | |
1136 | ||
1137 | init_timer(&np->rx_refill_timer); | |
1138 | np->rx_refill_timer.data = (unsigned long)netdev; | |
1139 | np->rx_refill_timer.function = rx_refill_timeout; | |
1140 | ||
1141 | /* Initialise tx_skbs as a free chain containing every entry. */ | |
1142 | np->tx_skb_freelist = 0; | |
1143 | for (i = 0; i < NET_TX_RING_SIZE; i++) { | |
1144 | np->tx_skbs[i].link = i+1; | |
1145 | np->grant_tx_ref[i] = GRANT_INVALID_REF; | |
1146 | } | |
1147 | ||
1148 | /* Clear out rx_skbs */ | |
1149 | for (i = 0; i < NET_RX_RING_SIZE; i++) { | |
1150 | np->rx_skbs[i] = NULL; | |
1151 | np->grant_rx_ref[i] = GRANT_INVALID_REF; | |
1152 | } | |
1153 | ||
1154 | /* A grant for every tx ring slot */ | |
1155 | if (gnttab_alloc_grant_references(TX_MAX_TARGET, | |
1156 | &np->gref_tx_head) < 0) { | |
1157 | printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); | |
1158 | err = -ENOMEM; | |
1159 | goto exit; | |
1160 | } | |
1161 | /* A grant for every rx ring slot */ | |
1162 | if (gnttab_alloc_grant_references(RX_MAX_TARGET, | |
1163 | &np->gref_rx_head) < 0) { | |
1164 | printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); | |
1165 | err = -ENOMEM; | |
1166 | goto exit_free_tx; | |
1167 | } | |
1168 | ||
1169 | netdev->open = xennet_open; | |
1170 | netdev->hard_start_xmit = xennet_start_xmit; | |
1171 | netdev->stop = xennet_close; | |
1172 | netdev->get_stats = xennet_get_stats; | |
bea3348e | 1173 | netif_napi_add(netdev, &np->napi, xennet_poll, 64); |
0d160211 JF |
1174 | netdev->uninit = xennet_uninit; |
1175 | netdev->change_mtu = xennet_change_mtu; | |
0d160211 JF |
1176 | netdev->features = NETIF_F_IP_CSUM; |
1177 | ||
1178 | SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); | |
0d160211 JF |
1179 | SET_NETDEV_DEV(netdev, &dev->dev); |
1180 | ||
1181 | np->netdev = netdev; | |
1182 | ||
1183 | netif_carrier_off(netdev); | |
1184 | ||
1185 | return netdev; | |
1186 | ||
1187 | exit_free_tx: | |
1188 | gnttab_free_grant_references(np->gref_tx_head); | |
1189 | exit: | |
1190 | free_netdev(netdev); | |
1191 | return ERR_PTR(err); | |
1192 | } | |
1193 | ||
1194 | /** | |
1195 | * Entry point to this code when a new device is created. Allocate the basic | |
1196 | * structures and the ring buffers for communication with the backend, and | |
1197 | * inform the backend of the appropriate details for those. | |
1198 | */ | |
1199 | static int __devinit netfront_probe(struct xenbus_device *dev, | |
1200 | const struct xenbus_device_id *id) | |
1201 | { | |
1202 | int err; | |
1203 | struct net_device *netdev; | |
1204 | struct netfront_info *info; | |
1205 | ||
1206 | netdev = xennet_create_dev(dev); | |
1207 | if (IS_ERR(netdev)) { | |
1208 | err = PTR_ERR(netdev); | |
1209 | xenbus_dev_fatal(dev, err, "creating netdev"); | |
1210 | return err; | |
1211 | } | |
1212 | ||
1213 | info = netdev_priv(netdev); | |
1214 | dev->dev.driver_data = info; | |
1215 | ||
1216 | err = register_netdev(info->netdev); | |
1217 | if (err) { | |
1218 | printk(KERN_WARNING "%s: register_netdev err=%d\n", | |
1219 | __func__, err); | |
1220 | goto fail; | |
1221 | } | |
1222 | ||
1223 | err = xennet_sysfs_addif(info->netdev); | |
1224 | if (err) { | |
1225 | unregister_netdev(info->netdev); | |
1226 | printk(KERN_WARNING "%s: add sysfs failed err=%d\n", | |
1227 | __func__, err); | |
1228 | goto fail; | |
1229 | } | |
1230 | ||
1231 | return 0; | |
1232 | ||
1233 | fail: | |
1234 | free_netdev(netdev); | |
1235 | dev->dev.driver_data = NULL; | |
1236 | return err; | |
1237 | } | |
1238 | ||
1239 | static void xennet_end_access(int ref, void *page) | |
1240 | { | |
1241 | /* This frees the page as a side-effect */ | |
1242 | if (ref != GRANT_INVALID_REF) | |
1243 | gnttab_end_foreign_access(ref, 0, (unsigned long)page); | |
1244 | } | |
1245 | ||
1246 | static void xennet_disconnect_backend(struct netfront_info *info) | |
1247 | { | |
1248 | /* Stop old i/f to prevent errors whilst we rebuild the state. */ | |
1249 | spin_lock_bh(&info->rx_lock); | |
1250 | spin_lock_irq(&info->tx_lock); | |
1251 | netif_carrier_off(info->netdev); | |
1252 | spin_unlock_irq(&info->tx_lock); | |
1253 | spin_unlock_bh(&info->rx_lock); | |
1254 | ||
1255 | if (info->netdev->irq) | |
1256 | unbind_from_irqhandler(info->netdev->irq, info->netdev); | |
1257 | info->evtchn = info->netdev->irq = 0; | |
1258 | ||
1259 | /* End access and free the pages */ | |
1260 | xennet_end_access(info->tx_ring_ref, info->tx.sring); | |
1261 | xennet_end_access(info->rx_ring_ref, info->rx.sring); | |
1262 | ||
1263 | info->tx_ring_ref = GRANT_INVALID_REF; | |
1264 | info->rx_ring_ref = GRANT_INVALID_REF; | |
1265 | info->tx.sring = NULL; | |
1266 | info->rx.sring = NULL; | |
1267 | } | |
1268 | ||
1269 | /** | |
1270 | * We are reconnecting to the backend, due to a suspend/resume, or a backend | |
1271 | * driver restart. We tear down our netif structure and recreate it, but | |
1272 | * leave the device-layer structures intact so that this is transparent to the | |
1273 | * rest of the kernel. | |
1274 | */ | |
1275 | static int netfront_resume(struct xenbus_device *dev) | |
1276 | { | |
1277 | struct netfront_info *info = dev->dev.driver_data; | |
1278 | ||
1279 | dev_dbg(&dev->dev, "%s\n", dev->nodename); | |
1280 | ||
1281 | xennet_disconnect_backend(info); | |
1282 | return 0; | |
1283 | } | |
1284 | ||
1285 | static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) | |
1286 | { | |
1287 | char *s, *e, *macstr; | |
1288 | int i; | |
1289 | ||
1290 | macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); | |
1291 | if (IS_ERR(macstr)) | |
1292 | return PTR_ERR(macstr); | |
1293 | ||
1294 | for (i = 0; i < ETH_ALEN; i++) { | |
1295 | mac[i] = simple_strtoul(s, &e, 16); | |
1296 | if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { | |
1297 | kfree(macstr); | |
1298 | return -ENOENT; | |
1299 | } | |
1300 | s = e+1; | |
1301 | } | |
1302 | ||
1303 | kfree(macstr); | |
1304 | return 0; | |
1305 | } | |
1306 | ||
1307 | static irqreturn_t xennet_interrupt(int irq, void *dev_id) | |
1308 | { | |
1309 | struct net_device *dev = dev_id; | |
1310 | struct netfront_info *np = netdev_priv(dev); | |
1311 | unsigned long flags; | |
1312 | ||
1313 | spin_lock_irqsave(&np->tx_lock, flags); | |
1314 | ||
1315 | if (likely(netif_carrier_ok(dev))) { | |
1316 | xennet_tx_buf_gc(dev); | |
1317 | /* Under tx_lock: protects access to rx shared-ring indexes. */ | |
1318 | if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) | |
bea3348e | 1319 | netif_rx_schedule(dev, &np->napi); |
0d160211 JF |
1320 | } |
1321 | ||
1322 | spin_unlock_irqrestore(&np->tx_lock, flags); | |
1323 | ||
1324 | return IRQ_HANDLED; | |
1325 | } | |
1326 | ||
1327 | static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) | |
1328 | { | |
1329 | struct xen_netif_tx_sring *txs; | |
1330 | struct xen_netif_rx_sring *rxs; | |
1331 | int err; | |
1332 | struct net_device *netdev = info->netdev; | |
1333 | ||
1334 | info->tx_ring_ref = GRANT_INVALID_REF; | |
1335 | info->rx_ring_ref = GRANT_INVALID_REF; | |
1336 | info->rx.sring = NULL; | |
1337 | info->tx.sring = NULL; | |
1338 | netdev->irq = 0; | |
1339 | ||
1340 | err = xen_net_read_mac(dev, netdev->dev_addr); | |
1341 | if (err) { | |
1342 | xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); | |
1343 | goto fail; | |
1344 | } | |
1345 | ||
1346 | txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_KERNEL); | |
1347 | if (!txs) { | |
1348 | err = -ENOMEM; | |
1349 | xenbus_dev_fatal(dev, err, "allocating tx ring page"); | |
1350 | goto fail; | |
1351 | } | |
1352 | SHARED_RING_INIT(txs); | |
1353 | FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); | |
1354 | ||
1355 | err = xenbus_grant_ring(dev, virt_to_mfn(txs)); | |
1356 | if (err < 0) { | |
1357 | free_page((unsigned long)txs); | |
1358 | goto fail; | |
1359 | } | |
1360 | ||
1361 | info->tx_ring_ref = err; | |
1362 | rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_KERNEL); | |
1363 | if (!rxs) { | |
1364 | err = -ENOMEM; | |
1365 | xenbus_dev_fatal(dev, err, "allocating rx ring page"); | |
1366 | goto fail; | |
1367 | } | |
1368 | SHARED_RING_INIT(rxs); | |
1369 | FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); | |
1370 | ||
1371 | err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); | |
1372 | if (err < 0) { | |
1373 | free_page((unsigned long)rxs); | |
1374 | goto fail; | |
1375 | } | |
1376 | info->rx_ring_ref = err; | |
1377 | ||
1378 | err = xenbus_alloc_evtchn(dev, &info->evtchn); | |
1379 | if (err) | |
1380 | goto fail; | |
1381 | ||
1382 | err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt, | |
1383 | IRQF_SAMPLE_RANDOM, netdev->name, | |
1384 | netdev); | |
1385 | if (err < 0) | |
1386 | goto fail; | |
1387 | netdev->irq = err; | |
1388 | return 0; | |
1389 | ||
1390 | fail: | |
1391 | return err; | |
1392 | } | |
1393 | ||
1394 | /* Common code used when first setting up, and when resuming. */ | |
1395 | static int talk_to_backend(struct xenbus_device *dev, | |
1396 | struct netfront_info *info) | |
1397 | { | |
1398 | const char *message; | |
1399 | struct xenbus_transaction xbt; | |
1400 | int err; | |
1401 | ||
1402 | /* Create shared ring, alloc event channel. */ | |
1403 | err = setup_netfront(dev, info); | |
1404 | if (err) | |
1405 | goto out; | |
1406 | ||
1407 | again: | |
1408 | err = xenbus_transaction_start(&xbt); | |
1409 | if (err) { | |
1410 | xenbus_dev_fatal(dev, err, "starting transaction"); | |
1411 | goto destroy_ring; | |
1412 | } | |
1413 | ||
1414 | err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u", | |
1415 | info->tx_ring_ref); | |
1416 | if (err) { | |
1417 | message = "writing tx ring-ref"; | |
1418 | goto abort_transaction; | |
1419 | } | |
1420 | err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u", | |
1421 | info->rx_ring_ref); | |
1422 | if (err) { | |
1423 | message = "writing rx ring-ref"; | |
1424 | goto abort_transaction; | |
1425 | } | |
1426 | err = xenbus_printf(xbt, dev->nodename, | |
1427 | "event-channel", "%u", info->evtchn); | |
1428 | if (err) { | |
1429 | message = "writing event-channel"; | |
1430 | goto abort_transaction; | |
1431 | } | |
1432 | ||
1433 | err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", | |
1434 | 1); | |
1435 | if (err) { | |
1436 | message = "writing request-rx-copy"; | |
1437 | goto abort_transaction; | |
1438 | } | |
1439 | ||
1440 | err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); | |
1441 | if (err) { | |
1442 | message = "writing feature-rx-notify"; | |
1443 | goto abort_transaction; | |
1444 | } | |
1445 | ||
1446 | err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); | |
1447 | if (err) { | |
1448 | message = "writing feature-sg"; | |
1449 | goto abort_transaction; | |
1450 | } | |
1451 | ||
1452 | err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); | |
1453 | if (err) { | |
1454 | message = "writing feature-gso-tcpv4"; | |
1455 | goto abort_transaction; | |
1456 | } | |
1457 | ||
1458 | err = xenbus_transaction_end(xbt, 0); | |
1459 | if (err) { | |
1460 | if (err == -EAGAIN) | |
1461 | goto again; | |
1462 | xenbus_dev_fatal(dev, err, "completing transaction"); | |
1463 | goto destroy_ring; | |
1464 | } | |
1465 | ||
1466 | return 0; | |
1467 | ||
1468 | abort_transaction: | |
1469 | xenbus_transaction_end(xbt, 1); | |
1470 | xenbus_dev_fatal(dev, err, "%s", message); | |
1471 | destroy_ring: | |
1472 | xennet_disconnect_backend(info); | |
1473 | out: | |
1474 | return err; | |
1475 | } | |
1476 | ||
1477 | static int xennet_set_sg(struct net_device *dev, u32 data) | |
1478 | { | |
1479 | if (data) { | |
1480 | struct netfront_info *np = netdev_priv(dev); | |
1481 | int val; | |
1482 | ||
1483 | if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", | |
1484 | "%d", &val) < 0) | |
1485 | val = 0; | |
1486 | if (!val) | |
1487 | return -ENOSYS; | |
1488 | } else if (dev->mtu > ETH_DATA_LEN) | |
1489 | dev->mtu = ETH_DATA_LEN; | |
1490 | ||
1491 | return ethtool_op_set_sg(dev, data); | |
1492 | } | |
1493 | ||
1494 | static int xennet_set_tso(struct net_device *dev, u32 data) | |
1495 | { | |
1496 | if (data) { | |
1497 | struct netfront_info *np = netdev_priv(dev); | |
1498 | int val; | |
1499 | ||
1500 | if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, | |
1501 | "feature-gso-tcpv4", "%d", &val) < 0) | |
1502 | val = 0; | |
1503 | if (!val) | |
1504 | return -ENOSYS; | |
1505 | } | |
1506 | ||
1507 | return ethtool_op_set_tso(dev, data); | |
1508 | } | |
1509 | ||
1510 | static void xennet_set_features(struct net_device *dev) | |
1511 | { | |
1512 | /* Turn off all GSO bits except ROBUST. */ | |
1513 | dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1; | |
1514 | dev->features |= NETIF_F_GSO_ROBUST; | |
1515 | xennet_set_sg(dev, 0); | |
1516 | ||
1517 | /* We need checksum offload to enable scatter/gather and TSO. */ | |
1518 | if (!(dev->features & NETIF_F_IP_CSUM)) | |
1519 | return; | |
1520 | ||
1521 | if (!xennet_set_sg(dev, 1)) | |
1522 | xennet_set_tso(dev, 1); | |
1523 | } | |
1524 | ||
1525 | static int xennet_connect(struct net_device *dev) | |
1526 | { | |
1527 | struct netfront_info *np = netdev_priv(dev); | |
1528 | int i, requeue_idx, err; | |
1529 | struct sk_buff *skb; | |
1530 | grant_ref_t ref; | |
1531 | struct xen_netif_rx_request *req; | |
1532 | unsigned int feature_rx_copy; | |
1533 | ||
1534 | err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, | |
1535 | "feature-rx-copy", "%u", &feature_rx_copy); | |
1536 | if (err != 1) | |
1537 | feature_rx_copy = 0; | |
1538 | ||
1539 | if (!feature_rx_copy) { | |
1540 | dev_info(&dev->dev, | |
1541 | "backend does not support copying recieve path"); | |
1542 | return -ENODEV; | |
1543 | } | |
1544 | ||
1545 | err = talk_to_backend(np->xbdev, np); | |
1546 | if (err) | |
1547 | return err; | |
1548 | ||
1549 | xennet_set_features(dev); | |
1550 | ||
1551 | spin_lock_bh(&np->rx_lock); | |
1552 | spin_lock_irq(&np->tx_lock); | |
1553 | ||
1554 | /* Step 1: Discard all pending TX packet fragments. */ | |
1555 | xennet_release_tx_bufs(np); | |
1556 | ||
1557 | /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ | |
1558 | for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { | |
1559 | if (!np->rx_skbs[i]) | |
1560 | continue; | |
1561 | ||
1562 | skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); | |
1563 | ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); | |
1564 | req = RING_GET_REQUEST(&np->rx, requeue_idx); | |
1565 | ||
1566 | gnttab_grant_foreign_access_ref( | |
1567 | ref, np->xbdev->otherend_id, | |
1568 | pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> | |
1569 | frags->page)), | |
1570 | 0); | |
1571 | req->gref = ref; | |
1572 | req->id = requeue_idx; | |
1573 | ||
1574 | requeue_idx++; | |
1575 | } | |
1576 | ||
1577 | np->rx.req_prod_pvt = requeue_idx; | |
1578 | ||
1579 | /* | |
1580 | * Step 3: All public and private state should now be sane. Get | |
1581 | * ready to start sending and receiving packets and give the driver | |
1582 | * domain a kick because we've probably just requeued some | |
1583 | * packets. | |
1584 | */ | |
1585 | netif_carrier_on(np->netdev); | |
1586 | notify_remote_via_irq(np->netdev->irq); | |
1587 | xennet_tx_buf_gc(dev); | |
1588 | xennet_alloc_rx_buffers(dev); | |
1589 | ||
1590 | spin_unlock_irq(&np->tx_lock); | |
1591 | spin_unlock_bh(&np->rx_lock); | |
1592 | ||
1593 | return 0; | |
1594 | } | |
1595 | ||
1596 | /** | |
1597 | * Callback received when the backend's state changes. | |
1598 | */ | |
1599 | static void backend_changed(struct xenbus_device *dev, | |
1600 | enum xenbus_state backend_state) | |
1601 | { | |
1602 | struct netfront_info *np = dev->dev.driver_data; | |
1603 | struct net_device *netdev = np->netdev; | |
1604 | ||
1605 | dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); | |
1606 | ||
1607 | switch (backend_state) { | |
1608 | case XenbusStateInitialising: | |
1609 | case XenbusStateInitialised: | |
1610 | case XenbusStateConnected: | |
1611 | case XenbusStateUnknown: | |
1612 | case XenbusStateClosed: | |
1613 | break; | |
1614 | ||
1615 | case XenbusStateInitWait: | |
1616 | if (dev->state != XenbusStateInitialising) | |
1617 | break; | |
1618 | if (xennet_connect(netdev) != 0) | |
1619 | break; | |
1620 | xenbus_switch_state(dev, XenbusStateConnected); | |
1621 | break; | |
1622 | ||
1623 | case XenbusStateClosing: | |
1624 | xenbus_frontend_closed(dev); | |
1625 | break; | |
1626 | } | |
1627 | } | |
1628 | ||
1629 | static struct ethtool_ops xennet_ethtool_ops = | |
1630 | { | |
1631 | .get_tx_csum = ethtool_op_get_tx_csum, | |
1632 | .set_tx_csum = ethtool_op_set_tx_csum, | |
1633 | .get_sg = ethtool_op_get_sg, | |
1634 | .set_sg = xennet_set_sg, | |
1635 | .get_tso = ethtool_op_get_tso, | |
1636 | .set_tso = xennet_set_tso, | |
1637 | .get_link = ethtool_op_get_link, | |
1638 | }; | |
1639 | ||
1640 | #ifdef CONFIG_SYSFS | |
1641 | static ssize_t show_rxbuf_min(struct device *dev, | |
1642 | struct device_attribute *attr, char *buf) | |
1643 | { | |
1644 | struct net_device *netdev = to_net_dev(dev); | |
1645 | struct netfront_info *info = netdev_priv(netdev); | |
1646 | ||
1647 | return sprintf(buf, "%u\n", info->rx_min_target); | |
1648 | } | |
1649 | ||
1650 | static ssize_t store_rxbuf_min(struct device *dev, | |
1651 | struct device_attribute *attr, | |
1652 | const char *buf, size_t len) | |
1653 | { | |
1654 | struct net_device *netdev = to_net_dev(dev); | |
1655 | struct netfront_info *np = netdev_priv(netdev); | |
1656 | char *endp; | |
1657 | unsigned long target; | |
1658 | ||
1659 | if (!capable(CAP_NET_ADMIN)) | |
1660 | return -EPERM; | |
1661 | ||
1662 | target = simple_strtoul(buf, &endp, 0); | |
1663 | if (endp == buf) | |
1664 | return -EBADMSG; | |
1665 | ||
1666 | if (target < RX_MIN_TARGET) | |
1667 | target = RX_MIN_TARGET; | |
1668 | if (target > RX_MAX_TARGET) | |
1669 | target = RX_MAX_TARGET; | |
1670 | ||
1671 | spin_lock_bh(&np->rx_lock); | |
1672 | if (target > np->rx_max_target) | |
1673 | np->rx_max_target = target; | |
1674 | np->rx_min_target = target; | |
1675 | if (target > np->rx_target) | |
1676 | np->rx_target = target; | |
1677 | ||
1678 | xennet_alloc_rx_buffers(netdev); | |
1679 | ||
1680 | spin_unlock_bh(&np->rx_lock); | |
1681 | return len; | |
1682 | } | |
1683 | ||
1684 | static ssize_t show_rxbuf_max(struct device *dev, | |
1685 | struct device_attribute *attr, char *buf) | |
1686 | { | |
1687 | struct net_device *netdev = to_net_dev(dev); | |
1688 | struct netfront_info *info = netdev_priv(netdev); | |
1689 | ||
1690 | return sprintf(buf, "%u\n", info->rx_max_target); | |
1691 | } | |
1692 | ||
1693 | static ssize_t store_rxbuf_max(struct device *dev, | |
1694 | struct device_attribute *attr, | |
1695 | const char *buf, size_t len) | |
1696 | { | |
1697 | struct net_device *netdev = to_net_dev(dev); | |
1698 | struct netfront_info *np = netdev_priv(netdev); | |
1699 | char *endp; | |
1700 | unsigned long target; | |
1701 | ||
1702 | if (!capable(CAP_NET_ADMIN)) | |
1703 | return -EPERM; | |
1704 | ||
1705 | target = simple_strtoul(buf, &endp, 0); | |
1706 | if (endp == buf) | |
1707 | return -EBADMSG; | |
1708 | ||
1709 | if (target < RX_MIN_TARGET) | |
1710 | target = RX_MIN_TARGET; | |
1711 | if (target > RX_MAX_TARGET) | |
1712 | target = RX_MAX_TARGET; | |
1713 | ||
1714 | spin_lock_bh(&np->rx_lock); | |
1715 | if (target < np->rx_min_target) | |
1716 | np->rx_min_target = target; | |
1717 | np->rx_max_target = target; | |
1718 | if (target < np->rx_target) | |
1719 | np->rx_target = target; | |
1720 | ||
1721 | xennet_alloc_rx_buffers(netdev); | |
1722 | ||
1723 | spin_unlock_bh(&np->rx_lock); | |
1724 | return len; | |
1725 | } | |
1726 | ||
1727 | static ssize_t show_rxbuf_cur(struct device *dev, | |
1728 | struct device_attribute *attr, char *buf) | |
1729 | { | |
1730 | struct net_device *netdev = to_net_dev(dev); | |
1731 | struct netfront_info *info = netdev_priv(netdev); | |
1732 | ||
1733 | return sprintf(buf, "%u\n", info->rx_target); | |
1734 | } | |
1735 | ||
1736 | static struct device_attribute xennet_attrs[] = { | |
1737 | __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), | |
1738 | __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), | |
1739 | __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), | |
1740 | }; | |
1741 | ||
1742 | static int xennet_sysfs_addif(struct net_device *netdev) | |
1743 | { | |
1744 | int i; | |
1745 | int err; | |
1746 | ||
1747 | for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { | |
1748 | err = device_create_file(&netdev->dev, | |
1749 | &xennet_attrs[i]); | |
1750 | if (err) | |
1751 | goto fail; | |
1752 | } | |
1753 | return 0; | |
1754 | ||
1755 | fail: | |
1756 | while (--i >= 0) | |
1757 | device_remove_file(&netdev->dev, &xennet_attrs[i]); | |
1758 | return err; | |
1759 | } | |
1760 | ||
1761 | static void xennet_sysfs_delif(struct net_device *netdev) | |
1762 | { | |
1763 | int i; | |
1764 | ||
1765 | for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) | |
1766 | device_remove_file(&netdev->dev, &xennet_attrs[i]); | |
1767 | } | |
1768 | ||
1769 | #endif /* CONFIG_SYSFS */ | |
1770 | ||
1771 | static struct xenbus_device_id netfront_ids[] = { | |
1772 | { "vif" }, | |
1773 | { "" } | |
1774 | }; | |
1775 | ||
1776 | ||
1777 | static int __devexit xennet_remove(struct xenbus_device *dev) | |
1778 | { | |
1779 | struct netfront_info *info = dev->dev.driver_data; | |
1780 | ||
1781 | dev_dbg(&dev->dev, "%s\n", dev->nodename); | |
1782 | ||
1783 | unregister_netdev(info->netdev); | |
1784 | ||
1785 | xennet_disconnect_backend(info); | |
1786 | ||
1787 | del_timer_sync(&info->rx_refill_timer); | |
1788 | ||
1789 | xennet_sysfs_delif(info->netdev); | |
1790 | ||
1791 | free_netdev(info->netdev); | |
1792 | ||
1793 | return 0; | |
1794 | } | |
1795 | ||
1796 | static struct xenbus_driver netfront = { | |
1797 | .name = "vif", | |
1798 | .owner = THIS_MODULE, | |
1799 | .ids = netfront_ids, | |
1800 | .probe = netfront_probe, | |
1801 | .remove = __devexit_p(xennet_remove), | |
1802 | .resume = netfront_resume, | |
1803 | .otherend_changed = backend_changed, | |
1804 | }; | |
1805 | ||
1806 | static int __init netif_init(void) | |
1807 | { | |
1808 | if (!is_running_on_xen()) | |
1809 | return -ENODEV; | |
1810 | ||
1811 | if (is_initial_xendomain()) | |
1812 | return 0; | |
1813 | ||
1814 | printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n"); | |
1815 | ||
1816 | return xenbus_register_frontend(&netfront); | |
1817 | } | |
1818 | module_init(netif_init); | |
1819 | ||
1820 | ||
1821 | static void __exit netif_exit(void) | |
1822 | { | |
1823 | if (is_initial_xendomain()) | |
1824 | return; | |
1825 | ||
1826 | return xenbus_unregister_driver(&netfront); | |
1827 | } | |
1828 | module_exit(netif_exit); | |
1829 | ||
1830 | MODULE_DESCRIPTION("Xen virtual network device frontend"); | |
1831 | MODULE_LICENSE("GPL"); |