ASoC: davinci-macsp: Optimize implicit BLCK sample-rate rule
[linux-2.6-block.git] / drivers / net / xen-netback / xenbus.c
1 /*
2  * Xenbus code for netif backend
3  *
4  * Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
5  * Copyright (C) 2005 XenSource Ltd
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "common.h"
22 #include <linux/vmalloc.h>
23 #include <linux/rtnetlink.h>
24
25 struct backend_info {
26         struct xenbus_device *dev;
27         struct xenvif *vif;
28
29         /* This is the state that will be reflected in xenstore when any
30          * active hotplug script completes.
31          */
32         enum xenbus_state state;
33
34         enum xenbus_state frontend_state;
35         struct xenbus_watch hotplug_status_watch;
36         u8 have_hotplug_status_watch:1;
37 };
38
39 static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
40 static void connect(struct backend_info *be);
41 static int read_xenbus_vif_flags(struct backend_info *be);
42 static int backend_create_xenvif(struct backend_info *be);
43 static void unregister_hotplug_status_watch(struct backend_info *be);
44 static void xen_unregister_watchers(struct xenvif *vif);
45 static void set_backend_state(struct backend_info *be,
46                               enum xenbus_state state);
47
48 #ifdef CONFIG_DEBUG_FS
49 struct dentry *xen_netback_dbg_root = NULL;
50
51 static int xenvif_read_io_ring(struct seq_file *m, void *v)
52 {
53         struct xenvif_queue *queue = m->private;
54         struct xen_netif_tx_back_ring *tx_ring = &queue->tx;
55         struct xen_netif_rx_back_ring *rx_ring = &queue->rx;
56         struct netdev_queue *dev_queue;
57
58         if (tx_ring->sring) {
59                 struct xen_netif_tx_sring *sring = tx_ring->sring;
60
61                 seq_printf(m, "Queue %d\nTX: nr_ents %u\n", queue->id,
62                            tx_ring->nr_ents);
63                 seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
64                            sring->req_prod,
65                            sring->req_prod - sring->rsp_prod,
66                            tx_ring->req_cons,
67                            tx_ring->req_cons - sring->rsp_prod,
68                            sring->req_event,
69                            sring->req_event - sring->rsp_prod);
70                 seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n",
71                            sring->rsp_prod,
72                            tx_ring->rsp_prod_pvt,
73                            tx_ring->rsp_prod_pvt - sring->rsp_prod,
74                            sring->rsp_event,
75                            sring->rsp_event - sring->rsp_prod);
76                 seq_printf(m, "pending prod %u pending cons %u nr_pending_reqs %u\n",
77                            queue->pending_prod,
78                            queue->pending_cons,
79                            nr_pending_reqs(queue));
80                 seq_printf(m, "dealloc prod %u dealloc cons %u dealloc_queue %u\n\n",
81                            queue->dealloc_prod,
82                            queue->dealloc_cons,
83                            queue->dealloc_prod - queue->dealloc_cons);
84         }
85
86         if (rx_ring->sring) {
87                 struct xen_netif_rx_sring *sring = rx_ring->sring;
88
89                 seq_printf(m, "RX: nr_ents %u\n", rx_ring->nr_ents);
90                 seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
91                            sring->req_prod,
92                            sring->req_prod - sring->rsp_prod,
93                            rx_ring->req_cons,
94                            rx_ring->req_cons - sring->rsp_prod,
95                            sring->req_event,
96                            sring->req_event - sring->rsp_prod);
97                 seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n\n",
98                            sring->rsp_prod,
99                            rx_ring->rsp_prod_pvt,
100                            rx_ring->rsp_prod_pvt - sring->rsp_prod,
101                            sring->rsp_event,
102                            sring->rsp_event - sring->rsp_prod);
103         }
104
105         seq_printf(m, "NAPI state: %lx NAPI weight: %d TX queue len %u\n"
106                    "Credit timer_pending: %d, credit: %lu, usec: %lu\n"
107                    "remaining: %lu, expires: %lu, now: %lu\n",
108                    queue->napi.state, queue->napi.weight,
109                    skb_queue_len(&queue->tx_queue),
110                    timer_pending(&queue->credit_timeout),
111                    queue->credit_bytes,
112                    queue->credit_usec,
113                    queue->remaining_credit,
114                    queue->credit_timeout.expires,
115                    jiffies);
116
117         dev_queue = netdev_get_tx_queue(queue->vif->dev, queue->id);
118
119         seq_printf(m, "\nRx internal queue: len %u max %u pkts %u %s\n",
120                    queue->rx_queue_len, queue->rx_queue_max,
121                    skb_queue_len(&queue->rx_queue),
122                    netif_tx_queue_stopped(dev_queue) ? "stopped" : "running");
123
124         return 0;
125 }
126
127 #define XENVIF_KICK_STR "kick"
128 #define BUFFER_SIZE     32
129
130 static ssize_t
131 xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count,
132                      loff_t *ppos)
133 {
134         struct xenvif_queue *queue =
135                 ((struct seq_file *)filp->private_data)->private;
136         int len;
137         char write[BUFFER_SIZE];
138
139         /* don't allow partial writes and check the length */
140         if (*ppos != 0)
141                 return 0;
142         if (count >= sizeof(write))
143                 return -ENOSPC;
144
145         len = simple_write_to_buffer(write,
146                                      sizeof(write) - 1,
147                                      ppos,
148                                      buf,
149                                      count);
150         if (len < 0)
151                 return len;
152
153         write[len] = '\0';
154
155         if (!strncmp(write, XENVIF_KICK_STR, sizeof(XENVIF_KICK_STR) - 1))
156                 xenvif_interrupt(0, (void *)queue);
157         else {
158                 pr_warn("Unknown command to io_ring_q%d. Available: kick\n",
159                         queue->id);
160                 count = -EINVAL;
161         }
162         return count;
163 }
164
165 static int xenvif_dump_open(struct inode *inode, struct file *filp)
166 {
167         int ret;
168         void *queue = NULL;
169
170         if (inode->i_private)
171                 queue = inode->i_private;
172         ret = single_open(filp, xenvif_read_io_ring, queue);
173         filp->f_mode |= FMODE_PWRITE;
174         return ret;
175 }
176
177 static const struct file_operations xenvif_dbg_io_ring_ops_fops = {
178         .owner = THIS_MODULE,
179         .open = xenvif_dump_open,
180         .read = seq_read,
181         .llseek = seq_lseek,
182         .release = single_release,
183         .write = xenvif_write_io_ring,
184 };
185
186 static void xenvif_debugfs_addif(struct xenvif *vif)
187 {
188         struct dentry *pfile;
189         int i;
190
191         if (IS_ERR_OR_NULL(xen_netback_dbg_root))
192                 return;
193
194         vif->xenvif_dbg_root = debugfs_create_dir(vif->dev->name,
195                                                   xen_netback_dbg_root);
196         if (!IS_ERR_OR_NULL(vif->xenvif_dbg_root)) {
197                 for (i = 0; i < vif->num_queues; ++i) {
198                         char filename[sizeof("io_ring_q") + 4];
199
200                         snprintf(filename, sizeof(filename), "io_ring_q%d", i);
201                         pfile = debugfs_create_file(filename,
202                                                     S_IRUSR | S_IWUSR,
203                                                     vif->xenvif_dbg_root,
204                                                     &vif->queues[i],
205                                                     &xenvif_dbg_io_ring_ops_fops);
206                         if (IS_ERR_OR_NULL(pfile))
207                                 pr_warn("Creation of io_ring file returned %ld!\n",
208                                         PTR_ERR(pfile));
209                 }
210         } else
211                 netdev_warn(vif->dev,
212                             "Creation of vif debugfs dir returned %ld!\n",
213                             PTR_ERR(vif->xenvif_dbg_root));
214 }
215
216 static void xenvif_debugfs_delif(struct xenvif *vif)
217 {
218         if (IS_ERR_OR_NULL(xen_netback_dbg_root))
219                 return;
220
221         if (!IS_ERR_OR_NULL(vif->xenvif_dbg_root))
222                 debugfs_remove_recursive(vif->xenvif_dbg_root);
223         vif->xenvif_dbg_root = NULL;
224 }
225 #endif /* CONFIG_DEBUG_FS */
226
227 static int netback_remove(struct xenbus_device *dev)
228 {
229         struct backend_info *be = dev_get_drvdata(&dev->dev);
230
231         set_backend_state(be, XenbusStateClosed);
232
233         unregister_hotplug_status_watch(be);
234         if (be->vif) {
235                 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
236                 xen_unregister_watchers(be->vif);
237                 xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
238                 xenvif_free(be->vif);
239                 be->vif = NULL;
240         }
241         kfree(be);
242         dev_set_drvdata(&dev->dev, NULL);
243         return 0;
244 }
245
246
247 /**
248  * Entry point to this code when a new device is created.  Allocate the basic
249  * structures and switch to InitWait.
250  */
251 static int netback_probe(struct xenbus_device *dev,
252                          const struct xenbus_device_id *id)
253 {
254         const char *message;
255         struct xenbus_transaction xbt;
256         int err;
257         int sg;
258         struct backend_info *be = kzalloc(sizeof(struct backend_info),
259                                           GFP_KERNEL);
260         if (!be) {
261                 xenbus_dev_fatal(dev, -ENOMEM,
262                                  "allocating backend structure");
263                 return -ENOMEM;
264         }
265
266         be->dev = dev;
267         dev_set_drvdata(&dev->dev, be);
268
269         sg = 1;
270
271         do {
272                 err = xenbus_transaction_start(&xbt);
273                 if (err) {
274                         xenbus_dev_fatal(dev, err, "starting transaction");
275                         goto fail;
276                 }
277
278                 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg);
279                 if (err) {
280                         message = "writing feature-sg";
281                         goto abort_transaction;
282                 }
283
284                 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
285                                     "%d", sg);
286                 if (err) {
287                         message = "writing feature-gso-tcpv4";
288                         goto abort_transaction;
289                 }
290
291                 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6",
292                                     "%d", sg);
293                 if (err) {
294                         message = "writing feature-gso-tcpv6";
295                         goto abort_transaction;
296                 }
297
298                 /* We support partial checksum setup for IPv6 packets */
299                 err = xenbus_printf(xbt, dev->nodename,
300                                     "feature-ipv6-csum-offload",
301                                     "%d", 1);
302                 if (err) {
303                         message = "writing feature-ipv6-csum-offload";
304                         goto abort_transaction;
305                 }
306
307                 /* We support rx-copy path. */
308                 err = xenbus_printf(xbt, dev->nodename,
309                                     "feature-rx-copy", "%d", 1);
310                 if (err) {
311                         message = "writing feature-rx-copy";
312                         goto abort_transaction;
313                 }
314
315                 /*
316                  * We don't support rx-flip path (except old guests who don't
317                  * grok this feature flag).
318                  */
319                 err = xenbus_printf(xbt, dev->nodename,
320                                     "feature-rx-flip", "%d", 0);
321                 if (err) {
322                         message = "writing feature-rx-flip";
323                         goto abort_transaction;
324                 }
325
326                 err = xenbus_transaction_end(xbt, 0);
327         } while (err == -EAGAIN);
328
329         if (err) {
330                 xenbus_dev_fatal(dev, err, "completing transaction");
331                 goto fail;
332         }
333
334         /*
335          * Split event channels support, this is optional so it is not
336          * put inside the above loop.
337          */
338         err = xenbus_printf(XBT_NIL, dev->nodename,
339                             "feature-split-event-channels",
340                             "%u", separate_tx_rx_irq);
341         if (err)
342                 pr_debug("Error writing feature-split-event-channels\n");
343
344         /* Multi-queue support: This is an optional feature. */
345         err = xenbus_printf(XBT_NIL, dev->nodename,
346                             "multi-queue-max-queues", "%u", xenvif_max_queues);
347         if (err)
348                 pr_debug("Error writing multi-queue-max-queues\n");
349
350         err = xenbus_switch_state(dev, XenbusStateInitWait);
351         if (err)
352                 goto fail;
353
354         be->state = XenbusStateInitWait;
355
356         /* This kicks hotplug scripts, so do it immediately. */
357         err = backend_create_xenvif(be);
358         if (err)
359                 goto fail;
360
361         return 0;
362
363 abort_transaction:
364         xenbus_transaction_end(xbt, 1);
365         xenbus_dev_fatal(dev, err, "%s", message);
366 fail:
367         pr_debug("failed\n");
368         netback_remove(dev);
369         return err;
370 }
371
372
373 /*
374  * Handle the creation of the hotplug script environment.  We add the script
375  * and vif variables to the environment, for the benefit of the vif-* hotplug
376  * scripts.
377  */
378 static int netback_uevent(struct xenbus_device *xdev,
379                           struct kobj_uevent_env *env)
380 {
381         struct backend_info *be = dev_get_drvdata(&xdev->dev);
382         char *val;
383
384         val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
385         if (IS_ERR(val)) {
386                 int err = PTR_ERR(val);
387                 xenbus_dev_fatal(xdev, err, "reading script");
388                 return err;
389         } else {
390                 if (add_uevent_var(env, "script=%s", val)) {
391                         kfree(val);
392                         return -ENOMEM;
393                 }
394                 kfree(val);
395         }
396
397         if (!be || !be->vif)
398                 return 0;
399
400         return add_uevent_var(env, "vif=%s", be->vif->dev->name);
401 }
402
403
404 static int backend_create_xenvif(struct backend_info *be)
405 {
406         int err;
407         long handle;
408         struct xenbus_device *dev = be->dev;
409         struct xenvif *vif;
410
411         if (be->vif != NULL)
412                 return 0;
413
414         err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
415         if (err != 1) {
416                 xenbus_dev_fatal(dev, err, "reading handle");
417                 return (err < 0) ? err : -EINVAL;
418         }
419
420         vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle);
421         if (IS_ERR(vif)) {
422                 err = PTR_ERR(vif);
423                 xenbus_dev_fatal(dev, err, "creating interface");
424                 return err;
425         }
426         be->vif = vif;
427
428         kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
429         return 0;
430 }
431
432 static void backend_disconnect(struct backend_info *be)
433 {
434         if (be->vif) {
435                 xen_unregister_watchers(be->vif);
436 #ifdef CONFIG_DEBUG_FS
437                 xenvif_debugfs_delif(be->vif);
438 #endif /* CONFIG_DEBUG_FS */
439                 xenvif_disconnect(be->vif);
440         }
441 }
442
443 static void backend_connect(struct backend_info *be)
444 {
445         if (be->vif)
446                 connect(be);
447 }
448
449 static inline void backend_switch_state(struct backend_info *be,
450                                         enum xenbus_state state)
451 {
452         struct xenbus_device *dev = be->dev;
453
454         pr_debug("%s -> %s\n", dev->nodename, xenbus_strstate(state));
455         be->state = state;
456
457         /* If we are waiting for a hotplug script then defer the
458          * actual xenbus state change.
459          */
460         if (!be->have_hotplug_status_watch)
461                 xenbus_switch_state(dev, state);
462 }
463
464 /* Handle backend state transitions:
465  *
466  * The backend state starts in InitWait and the following transitions are
467  * allowed.
468  *
469  * InitWait -> Connected
470  *
471  *    ^    \         |
472  *    |     \        |
473  *    |      \       |
474  *    |       \      |
475  *    |        \     |
476  *    |         \    |
477  *    |          V   V
478  *
479  *  Closed  <-> Closing
480  *
481  * The state argument specifies the eventual state of the backend and the
482  * function transitions to that state via the shortest path.
483  */
484 static void set_backend_state(struct backend_info *be,
485                               enum xenbus_state state)
486 {
487         while (be->state != state) {
488                 switch (be->state) {
489                 case XenbusStateClosed:
490                         switch (state) {
491                         case XenbusStateInitWait:
492                         case XenbusStateConnected:
493                                 pr_info("%s: prepare for reconnect\n",
494                                         be->dev->nodename);
495                                 backend_switch_state(be, XenbusStateInitWait);
496                                 break;
497                         case XenbusStateClosing:
498                                 backend_switch_state(be, XenbusStateClosing);
499                                 break;
500                         default:
501                                 BUG();
502                         }
503                         break;
504                 case XenbusStateInitWait:
505                         switch (state) {
506                         case XenbusStateConnected:
507                                 backend_connect(be);
508                                 backend_switch_state(be, XenbusStateConnected);
509                                 break;
510                         case XenbusStateClosing:
511                         case XenbusStateClosed:
512                                 backend_switch_state(be, XenbusStateClosing);
513                                 break;
514                         default:
515                                 BUG();
516                         }
517                         break;
518                 case XenbusStateConnected:
519                         switch (state) {
520                         case XenbusStateInitWait:
521                         case XenbusStateClosing:
522                         case XenbusStateClosed:
523                                 backend_disconnect(be);
524                                 backend_switch_state(be, XenbusStateClosing);
525                                 break;
526                         default:
527                                 BUG();
528                         }
529                         break;
530                 case XenbusStateClosing:
531                         switch (state) {
532                         case XenbusStateInitWait:
533                         case XenbusStateConnected:
534                         case XenbusStateClosed:
535                                 backend_switch_state(be, XenbusStateClosed);
536                                 break;
537                         default:
538                                 BUG();
539                         }
540                         break;
541                 default:
542                         BUG();
543                 }
544         }
545 }
546
547 /**
548  * Callback received when the frontend's state changes.
549  */
550 static void frontend_changed(struct xenbus_device *dev,
551                              enum xenbus_state frontend_state)
552 {
553         struct backend_info *be = dev_get_drvdata(&dev->dev);
554
555         pr_debug("%s -> %s\n", dev->otherend, xenbus_strstate(frontend_state));
556
557         be->frontend_state = frontend_state;
558
559         switch (frontend_state) {
560         case XenbusStateInitialising:
561                 set_backend_state(be, XenbusStateInitWait);
562                 break;
563
564         case XenbusStateInitialised:
565                 break;
566
567         case XenbusStateConnected:
568                 set_backend_state(be, XenbusStateConnected);
569                 break;
570
571         case XenbusStateClosing:
572                 set_backend_state(be, XenbusStateClosing);
573                 break;
574
575         case XenbusStateClosed:
576                 set_backend_state(be, XenbusStateClosed);
577                 if (xenbus_dev_is_online(dev))
578                         break;
579                 /* fall through if not online */
580         case XenbusStateUnknown:
581                 set_backend_state(be, XenbusStateClosed);
582                 device_unregister(&dev->dev);
583                 break;
584
585         default:
586                 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
587                                  frontend_state);
588                 break;
589         }
590 }
591
592
593 static void xen_net_read_rate(struct xenbus_device *dev,
594                               unsigned long *bytes, unsigned long *usec)
595 {
596         char *s, *e;
597         unsigned long b, u;
598         char *ratestr;
599
600         /* Default to unlimited bandwidth. */
601         *bytes = ~0UL;
602         *usec = 0;
603
604         ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL);
605         if (IS_ERR(ratestr))
606                 return;
607
608         s = ratestr;
609         b = simple_strtoul(s, &e, 10);
610         if ((s == e) || (*e != ','))
611                 goto fail;
612
613         s = e + 1;
614         u = simple_strtoul(s, &e, 10);
615         if ((s == e) || (*e != '\0'))
616                 goto fail;
617
618         *bytes = b;
619         *usec = u;
620
621         kfree(ratestr);
622         return;
623
624  fail:
625         pr_warn("Failed to parse network rate limit. Traffic unlimited.\n");
626         kfree(ratestr);
627 }
628
629 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
630 {
631         char *s, *e, *macstr;
632         int i;
633
634         macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
635         if (IS_ERR(macstr))
636                 return PTR_ERR(macstr);
637
638         for (i = 0; i < ETH_ALEN; i++) {
639                 mac[i] = simple_strtoul(s, &e, 16);
640                 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
641                         kfree(macstr);
642                         return -ENOENT;
643                 }
644                 s = e+1;
645         }
646
647         kfree(macstr);
648         return 0;
649 }
650
651 static void xen_net_rate_changed(struct xenbus_watch *watch,
652                                 const char **vec, unsigned int len)
653 {
654         struct xenvif *vif = container_of(watch, struct xenvif, credit_watch);
655         struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
656         unsigned long   credit_bytes;
657         unsigned long   credit_usec;
658         unsigned int queue_index;
659
660         xen_net_read_rate(dev, &credit_bytes, &credit_usec);
661         for (queue_index = 0; queue_index < vif->num_queues; queue_index++) {
662                 struct xenvif_queue *queue = &vif->queues[queue_index];
663
664                 queue->credit_bytes = credit_bytes;
665                 queue->credit_usec = credit_usec;
666                 if (!mod_timer_pending(&queue->credit_timeout, jiffies) &&
667                         queue->remaining_credit > queue->credit_bytes) {
668                         queue->remaining_credit = queue->credit_bytes;
669                 }
670         }
671 }
672
673 static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif)
674 {
675         int err = 0;
676         char *node;
677         unsigned maxlen = strlen(dev->nodename) + sizeof("/rate");
678
679         node = kmalloc(maxlen, GFP_KERNEL);
680         if (!node)
681                 return -ENOMEM;
682         snprintf(node, maxlen, "%s/rate", dev->nodename);
683         vif->credit_watch.node = node;
684         vif->credit_watch.callback = xen_net_rate_changed;
685         err = register_xenbus_watch(&vif->credit_watch);
686         if (err) {
687                 pr_err("Failed to set watcher %s\n", vif->credit_watch.node);
688                 kfree(node);
689                 vif->credit_watch.node = NULL;
690                 vif->credit_watch.callback = NULL;
691         }
692         return err;
693 }
694
695 static void xen_unregister_watchers(struct xenvif *vif)
696 {
697         if (vif->credit_watch.node) {
698                 unregister_xenbus_watch(&vif->credit_watch);
699                 kfree(vif->credit_watch.node);
700                 vif->credit_watch.node = NULL;
701         }
702 }
703
704 static void unregister_hotplug_status_watch(struct backend_info *be)
705 {
706         if (be->have_hotplug_status_watch) {
707                 unregister_xenbus_watch(&be->hotplug_status_watch);
708                 kfree(be->hotplug_status_watch.node);
709         }
710         be->have_hotplug_status_watch = 0;
711 }
712
713 static void hotplug_status_changed(struct xenbus_watch *watch,
714                                    const char **vec,
715                                    unsigned int vec_size)
716 {
717         struct backend_info *be = container_of(watch,
718                                                struct backend_info,
719                                                hotplug_status_watch);
720         char *str;
721         unsigned int len;
722
723         str = xenbus_read(XBT_NIL, be->dev->nodename, "hotplug-status", &len);
724         if (IS_ERR(str))
725                 return;
726         if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) {
727                 /* Complete any pending state change */
728                 xenbus_switch_state(be->dev, be->state);
729
730                 /* Not interested in this watch anymore. */
731                 unregister_hotplug_status_watch(be);
732         }
733         kfree(str);
734 }
735
736 static void connect(struct backend_info *be)
737 {
738         int err;
739         struct xenbus_device *dev = be->dev;
740         unsigned long credit_bytes, credit_usec;
741         unsigned int queue_index;
742         unsigned int requested_num_queues;
743         struct xenvif_queue *queue;
744
745         /* Check whether the frontend requested multiple queues
746          * and read the number requested.
747          */
748         err = xenbus_scanf(XBT_NIL, dev->otherend,
749                            "multi-queue-num-queues",
750                            "%u", &requested_num_queues);
751         if (err < 0) {
752                 requested_num_queues = 1; /* Fall back to single queue */
753         } else if (requested_num_queues > xenvif_max_queues) {
754                 /* buggy or malicious guest */
755                 xenbus_dev_fatal(dev, err,
756                                  "guest requested %u queues, exceeding the maximum of %u.",
757                                  requested_num_queues, xenvif_max_queues);
758                 return;
759         }
760
761         err = xen_net_read_mac(dev, be->vif->fe_dev_addr);
762         if (err) {
763                 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
764                 return;
765         }
766
767         xen_net_read_rate(dev, &credit_bytes, &credit_usec);
768         xen_register_watchers(dev, be->vif);
769         read_xenbus_vif_flags(be);
770
771         /* Use the number of queues requested by the frontend */
772         be->vif->queues = vzalloc(requested_num_queues *
773                                   sizeof(struct xenvif_queue));
774         be->vif->num_queues = requested_num_queues;
775         be->vif->stalled_queues = requested_num_queues;
776
777         for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
778                 queue = &be->vif->queues[queue_index];
779                 queue->vif = be->vif;
780                 queue->id = queue_index;
781                 snprintf(queue->name, sizeof(queue->name), "%s-q%u",
782                                 be->vif->dev->name, queue->id);
783
784                 err = xenvif_init_queue(queue);
785                 if (err) {
786                         /* xenvif_init_queue() cleans up after itself on
787                          * failure, but we need to clean up any previously
788                          * initialised queues. Set num_queues to i so that
789                          * earlier queues can be destroyed using the regular
790                          * disconnect logic.
791                          */
792                         be->vif->num_queues = queue_index;
793                         goto err;
794                 }
795
796                 queue->remaining_credit = credit_bytes;
797                 queue->credit_usec = credit_usec;
798
799                 err = connect_rings(be, queue);
800                 if (err) {
801                         /* connect_rings() cleans up after itself on failure,
802                          * but we need to clean up after xenvif_init_queue() here,
803                          * and also clean up any previously initialised queues.
804                          */
805                         xenvif_deinit_queue(queue);
806                         be->vif->num_queues = queue_index;
807                         goto err;
808                 }
809         }
810
811 #ifdef CONFIG_DEBUG_FS
812         xenvif_debugfs_addif(be->vif);
813 #endif /* CONFIG_DEBUG_FS */
814
815         /* Initialisation completed, tell core driver the number of
816          * active queues.
817          */
818         rtnl_lock();
819         netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
820         netif_set_real_num_rx_queues(be->vif->dev, requested_num_queues);
821         rtnl_unlock();
822
823         xenvif_carrier_on(be->vif);
824
825         unregister_hotplug_status_watch(be);
826         err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
827                                    hotplug_status_changed,
828                                    "%s/%s", dev->nodename, "hotplug-status");
829         if (!err)
830                 be->have_hotplug_status_watch = 1;
831
832         netif_tx_wake_all_queues(be->vif->dev);
833
834         return;
835
836 err:
837         if (be->vif->num_queues > 0)
838                 xenvif_disconnect(be->vif); /* Clean up existing queues */
839         vfree(be->vif->queues);
840         be->vif->queues = NULL;
841         be->vif->num_queues = 0;
842         return;
843 }
844
845
846 static int connect_rings(struct backend_info *be, struct xenvif_queue *queue)
847 {
848         struct xenbus_device *dev = be->dev;
849         unsigned int num_queues = queue->vif->num_queues;
850         unsigned long tx_ring_ref, rx_ring_ref;
851         unsigned int tx_evtchn, rx_evtchn;
852         int err;
853         char *xspath;
854         size_t xspathsize;
855         const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
856
857         /* If the frontend requested 1 queue, or we have fallen back
858          * to single queue due to lack of frontend support for multi-
859          * queue, expect the remaining XenStore keys in the toplevel
860          * directory. Otherwise, expect them in a subdirectory called
861          * queue-N.
862          */
863         if (num_queues == 1) {
864                 xspath = kzalloc(strlen(dev->otherend) + 1, GFP_KERNEL);
865                 if (!xspath) {
866                         xenbus_dev_fatal(dev, -ENOMEM,
867                                          "reading ring references");
868                         return -ENOMEM;
869                 }
870                 strcpy(xspath, dev->otherend);
871         } else {
872                 xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
873                 xspath = kzalloc(xspathsize, GFP_KERNEL);
874                 if (!xspath) {
875                         xenbus_dev_fatal(dev, -ENOMEM,
876                                          "reading ring references");
877                         return -ENOMEM;
878                 }
879                 snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend,
880                          queue->id);
881         }
882
883         err = xenbus_gather(XBT_NIL, xspath,
884                             "tx-ring-ref", "%lu", &tx_ring_ref,
885                             "rx-ring-ref", "%lu", &rx_ring_ref, NULL);
886         if (err) {
887                 xenbus_dev_fatal(dev, err,
888                                  "reading %s/ring-ref",
889                                  xspath);
890                 goto err;
891         }
892
893         /* Try split event channels first, then single event channel. */
894         err = xenbus_gather(XBT_NIL, xspath,
895                             "event-channel-tx", "%u", &tx_evtchn,
896                             "event-channel-rx", "%u", &rx_evtchn, NULL);
897         if (err < 0) {
898                 err = xenbus_scanf(XBT_NIL, xspath,
899                                    "event-channel", "%u", &tx_evtchn);
900                 if (err < 0) {
901                         xenbus_dev_fatal(dev, err,
902                                          "reading %s/event-channel(-tx/rx)",
903                                          xspath);
904                         goto err;
905                 }
906                 rx_evtchn = tx_evtchn;
907         }
908
909         /* Map the shared frame, irq etc. */
910         err = xenvif_connect(queue, tx_ring_ref, rx_ring_ref,
911                              tx_evtchn, rx_evtchn);
912         if (err) {
913                 xenbus_dev_fatal(dev, err,
914                                  "mapping shared-frames %lu/%lu port tx %u rx %u",
915                                  tx_ring_ref, rx_ring_ref,
916                                  tx_evtchn, rx_evtchn);
917                 goto err;
918         }
919
920         err = 0;
921 err: /* Regular return falls through with err == 0 */
922         kfree(xspath);
923         return err;
924 }
925
926 static int read_xenbus_vif_flags(struct backend_info *be)
927 {
928         struct xenvif *vif = be->vif;
929         struct xenbus_device *dev = be->dev;
930         unsigned int rx_copy;
931         int err, val;
932
933         err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
934                            &rx_copy);
935         if (err == -ENOENT) {
936                 err = 0;
937                 rx_copy = 0;
938         }
939         if (err < 0) {
940                 xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy",
941                                  dev->otherend);
942                 return err;
943         }
944         if (!rx_copy)
945                 return -EOPNOTSUPP;
946
947         if (xenbus_scanf(XBT_NIL, dev->otherend,
948                          "feature-rx-notify", "%d", &val) < 0)
949                 val = 0;
950         if (!val) {
951                 /* - Reduce drain timeout to poll more frequently for
952                  *   Rx requests.
953                  * - Disable Rx stall detection.
954                  */
955                 be->vif->drain_timeout = msecs_to_jiffies(30);
956                 be->vif->stall_timeout = 0;
957         }
958
959         if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg",
960                          "%d", &val) < 0)
961                 val = 0;
962         vif->can_sg = !!val;
963
964         vif->gso_mask = 0;
965         vif->gso_prefix_mask = 0;
966
967         if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4",
968                          "%d", &val) < 0)
969                 val = 0;
970         if (val)
971                 vif->gso_mask |= GSO_BIT(TCPV4);
972
973         if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix",
974                          "%d", &val) < 0)
975                 val = 0;
976         if (val)
977                 vif->gso_prefix_mask |= GSO_BIT(TCPV4);
978
979         if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6",
980                          "%d", &val) < 0)
981                 val = 0;
982         if (val)
983                 vif->gso_mask |= GSO_BIT(TCPV6);
984
985         if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix",
986                          "%d", &val) < 0)
987                 val = 0;
988         if (val)
989                 vif->gso_prefix_mask |= GSO_BIT(TCPV6);
990
991         if (vif->gso_mask & vif->gso_prefix_mask) {
992                 xenbus_dev_fatal(dev, err,
993                                  "%s: gso and gso prefix flags are not "
994                                  "mutually exclusive",
995                                  dev->otherend);
996                 return -EOPNOTSUPP;
997         }
998
999         if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
1000                          "%d", &val) < 0)
1001                 val = 0;
1002         vif->ip_csum = !val;
1003
1004         if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum-offload",
1005                          "%d", &val) < 0)
1006                 val = 0;
1007         vif->ipv6_csum = !!val;
1008
1009         return 0;
1010 }
1011
1012 static const struct xenbus_device_id netback_ids[] = {
1013         { "vif" },
1014         { "" }
1015 };
1016
1017 static struct xenbus_driver netback_driver = {
1018         .ids = netback_ids,
1019         .probe = netback_probe,
1020         .remove = netback_remove,
1021         .uevent = netback_uevent,
1022         .otherend_changed = frontend_changed,
1023 };
1024
1025 int xenvif_xenbus_init(void)
1026 {
1027         return xenbus_register_backend(&netback_driver);
1028 }
1029
1030 void xenvif_xenbus_fini(void)
1031 {
1032         return xenbus_unregister_driver(&netback_driver);
1033 }