IPoIB: Move ipoib_ib_dev_flush() to ipoib workqueue
[linux-block.git] / drivers / infiniband / ulp / ipoib / ipoib_main.c
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * $Id: ipoib_main.c 1377 2004-12-23 19:57:12Z roland $
35  */
36
37 #include "ipoib.h"
38
39 #include <linux/module.h>
40
41 #include <linux/init.h>
42 #include <linux/slab.h>
43 #include <linux/vmalloc.h>
44
45 #include <linux/if_arp.h>       /* For ARPHRD_xxx */
46
47 #include <linux/ip.h>
48 #include <linux/in.h>
49
50 #include <net/dst.h>
51
52 MODULE_AUTHOR("Roland Dreier");
53 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
54 MODULE_LICENSE("Dual BSD/GPL");
55
56 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
57 int ipoib_debug_level;
58
59 module_param_named(debug_level, ipoib_debug_level, int, 0644);
60 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
61 #endif
62
63 struct ipoib_path_iter {
64         struct net_device *dev;
65         struct ipoib_path  path;
66 };
67
68 static const u8 ipv4_bcast_addr[] = {
69         0x00, 0xff, 0xff, 0xff,
70         0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
71         0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
72 };
73
74 struct workqueue_struct *ipoib_workqueue;
75
76 static void ipoib_add_one(struct ib_device *device);
77 static void ipoib_remove_one(struct ib_device *device);
78
79 static struct ib_client ipoib_client = {
80         .name   = "ipoib",
81         .add    = ipoib_add_one,
82         .remove = ipoib_remove_one
83 };
84
85 int ipoib_open(struct net_device *dev)
86 {
87         struct ipoib_dev_priv *priv = netdev_priv(dev);
88
89         ipoib_dbg(priv, "bringing up interface\n");
90
91         set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
92
93         if (ipoib_pkey_dev_delay_open(dev))
94                 return 0;
95
96         if (ipoib_ib_dev_open(dev))
97                 return -EINVAL;
98
99         if (ipoib_ib_dev_up(dev)) {
100                 ipoib_ib_dev_stop(dev);
101                 return -EINVAL;
102         }
103
104         if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
105                 struct ipoib_dev_priv *cpriv;
106
107                 /* Bring up any child interfaces too */
108                 mutex_lock(&priv->vlan_mutex);
109                 list_for_each_entry(cpriv, &priv->child_intfs, list) {
110                         int flags;
111
112                         flags = cpriv->dev->flags;
113                         if (flags & IFF_UP)
114                                 continue;
115
116                         dev_change_flags(cpriv->dev, flags | IFF_UP);
117                 }
118                 mutex_unlock(&priv->vlan_mutex);
119         }
120
121         netif_start_queue(dev);
122
123         return 0;
124 }
125
126 static int ipoib_stop(struct net_device *dev)
127 {
128         struct ipoib_dev_priv *priv = netdev_priv(dev);
129
130         ipoib_dbg(priv, "stopping interface\n");
131
132         clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
133
134         netif_stop_queue(dev);
135
136         /*
137          * Now flush workqueue to make sure a scheduled task doesn't
138          * bring our internal state back up.
139          */
140         flush_workqueue(ipoib_workqueue);
141
142         ipoib_ib_dev_down(dev, 1);
143         ipoib_ib_dev_stop(dev);
144
145         if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
146                 struct ipoib_dev_priv *cpriv;
147
148                 /* Bring down any child interfaces too */
149                 mutex_lock(&priv->vlan_mutex);
150                 list_for_each_entry(cpriv, &priv->child_intfs, list) {
151                         int flags;
152
153                         flags = cpriv->dev->flags;
154                         if (!(flags & IFF_UP))
155                                 continue;
156
157                         dev_change_flags(cpriv->dev, flags & ~IFF_UP);
158                 }
159                 mutex_unlock(&priv->vlan_mutex);
160         }
161
162         return 0;
163 }
164
165 static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
166 {
167         struct ipoib_dev_priv *priv = netdev_priv(dev);
168
169         if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN)
170                 return -EINVAL;
171
172         priv->admin_mtu = new_mtu;
173
174         dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
175
176         return 0;
177 }
178
179 static struct ipoib_path *__path_find(struct net_device *dev,
180                                       union ib_gid *gid)
181 {
182         struct ipoib_dev_priv *priv = netdev_priv(dev);
183         struct rb_node *n = priv->path_tree.rb_node;
184         struct ipoib_path *path;
185         int ret;
186
187         while (n) {
188                 path = rb_entry(n, struct ipoib_path, rb_node);
189
190                 ret = memcmp(gid->raw, path->pathrec.dgid.raw,
191                              sizeof (union ib_gid));
192
193                 if (ret < 0)
194                         n = n->rb_left;
195                 else if (ret > 0)
196                         n = n->rb_right;
197                 else
198                         return path;
199         }
200
201         return NULL;
202 }
203
204 static int __path_add(struct net_device *dev, struct ipoib_path *path)
205 {
206         struct ipoib_dev_priv *priv = netdev_priv(dev);
207         struct rb_node **n = &priv->path_tree.rb_node;
208         struct rb_node *pn = NULL;
209         struct ipoib_path *tpath;
210         int ret;
211
212         while (*n) {
213                 pn = *n;
214                 tpath = rb_entry(pn, struct ipoib_path, rb_node);
215
216                 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
217                              sizeof (union ib_gid));
218                 if (ret < 0)
219                         n = &pn->rb_left;
220                 else if (ret > 0)
221                         n = &pn->rb_right;
222                 else
223                         return -EEXIST;
224         }
225
226         rb_link_node(&path->rb_node, pn, n);
227         rb_insert_color(&path->rb_node, &priv->path_tree);
228
229         list_add_tail(&path->list, &priv->path_list);
230
231         return 0;
232 }
233
234 static void path_free(struct net_device *dev, struct ipoib_path *path)
235 {
236         struct ipoib_dev_priv *priv = netdev_priv(dev);
237         struct ipoib_neigh *neigh, *tn;
238         struct sk_buff *skb;
239         unsigned long flags;
240
241         while ((skb = __skb_dequeue(&path->queue)))
242                 dev_kfree_skb_irq(skb);
243
244         spin_lock_irqsave(&priv->lock, flags);
245
246         list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
247                 /*
248                  * It's safe to call ipoib_put_ah() inside priv->lock
249                  * here, because we know that path->ah will always
250                  * hold one more reference, so ipoib_put_ah() will
251                  * never do more than decrement the ref count.
252                  */
253                 if (neigh->ah)
254                         ipoib_put_ah(neigh->ah);
255                 *to_ipoib_neigh(neigh->neighbour) = NULL;
256                 neigh->neighbour->ops->destructor = NULL;
257                 kfree(neigh);
258         }
259
260         spin_unlock_irqrestore(&priv->lock, flags);
261
262         if (path->ah)
263                 ipoib_put_ah(path->ah);
264
265         kfree(path);
266 }
267
268 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
269
270 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
271 {
272         struct ipoib_path_iter *iter;
273
274         iter = kmalloc(sizeof *iter, GFP_KERNEL);
275         if (!iter)
276                 return NULL;
277
278         iter->dev = dev;
279         memset(iter->path.pathrec.dgid.raw, 0, 16);
280
281         if (ipoib_path_iter_next(iter)) {
282                 kfree(iter);
283                 return NULL;
284         }
285
286         return iter;
287 }
288
289 int ipoib_path_iter_next(struct ipoib_path_iter *iter)
290 {
291         struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
292         struct rb_node *n;
293         struct ipoib_path *path;
294         int ret = 1;
295
296         spin_lock_irq(&priv->lock);
297
298         n = rb_first(&priv->path_tree);
299
300         while (n) {
301                 path = rb_entry(n, struct ipoib_path, rb_node);
302
303                 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
304                            sizeof (union ib_gid)) < 0) {
305                         iter->path = *path;
306                         ret = 0;
307                         break;
308                 }
309
310                 n = rb_next(n);
311         }
312
313         spin_unlock_irq(&priv->lock);
314
315         return ret;
316 }
317
318 void ipoib_path_iter_read(struct ipoib_path_iter *iter,
319                           struct ipoib_path *path)
320 {
321         *path = iter->path;
322 }
323
324 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
325
326 void ipoib_flush_paths(struct net_device *dev)
327 {
328         struct ipoib_dev_priv *priv = netdev_priv(dev);
329         struct ipoib_path *path, *tp;
330         LIST_HEAD(remove_list);
331         unsigned long flags;
332
333         spin_lock_irqsave(&priv->lock, flags);
334
335         list_splice(&priv->path_list, &remove_list);
336         INIT_LIST_HEAD(&priv->path_list);
337
338         list_for_each_entry(path, &remove_list, list)
339                 rb_erase(&path->rb_node, &priv->path_tree);
340
341         spin_unlock_irqrestore(&priv->lock, flags);
342
343         list_for_each_entry_safe(path, tp, &remove_list, list) {
344                 if (path->query)
345                         ib_sa_cancel_query(path->query_id, path->query);
346                 wait_for_completion(&path->done);
347                 path_free(dev, path);
348         }
349 }
350
351 static void path_rec_completion(int status,
352                                 struct ib_sa_path_rec *pathrec,
353                                 void *path_ptr)
354 {
355         struct ipoib_path *path = path_ptr;
356         struct net_device *dev = path->dev;
357         struct ipoib_dev_priv *priv = netdev_priv(dev);
358         struct ipoib_ah *ah = NULL;
359         struct ipoib_neigh *neigh;
360         struct sk_buff_head skqueue;
361         struct sk_buff *skb;
362         unsigned long flags;
363
364         if (pathrec)
365                 ipoib_dbg(priv, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT "\n",
366                           be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid));
367         else
368                 ipoib_dbg(priv, "PathRec status %d for GID " IPOIB_GID_FMT "\n",
369                           status, IPOIB_GID_ARG(path->pathrec.dgid));
370
371         skb_queue_head_init(&skqueue);
372
373         if (!status) {
374                 struct ib_ah_attr av = {
375                         .dlid          = be16_to_cpu(pathrec->dlid),
376                         .sl            = pathrec->sl,
377                         .port_num      = priv->port
378                 };
379                 int path_rate = ib_sa_rate_enum_to_int(pathrec->rate);
380
381                 if (path_rate > 0 && priv->local_rate > path_rate)
382                         av.static_rate = (priv->local_rate - 1) / path_rate;
383
384                 ipoib_dbg(priv, "static_rate %d for local port %dX, path %dX\n",
385                           av.static_rate, priv->local_rate,
386                           ib_sa_rate_enum_to_int(pathrec->rate));
387
388                 ah = ipoib_create_ah(dev, priv->pd, &av);
389         }
390
391         spin_lock_irqsave(&priv->lock, flags);
392
393         path->ah = ah;
394
395         if (ah) {
396                 path->pathrec = *pathrec;
397
398                 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
399                           ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
400
401                 while ((skb = __skb_dequeue(&path->queue)))
402                         __skb_queue_tail(&skqueue, skb);
403
404                 list_for_each_entry(neigh, &path->neigh_list, list) {
405                         kref_get(&path->ah->ref);
406                         neigh->ah = path->ah;
407
408                         while ((skb = __skb_dequeue(&neigh->queue)))
409                                 __skb_queue_tail(&skqueue, skb);
410                 }
411         }
412
413         path->query = NULL;
414         complete(&path->done);
415
416         spin_unlock_irqrestore(&priv->lock, flags);
417
418         while ((skb = __skb_dequeue(&skqueue))) {
419                 skb->dev = dev;
420                 if (dev_queue_xmit(skb))
421                         ipoib_warn(priv, "dev_queue_xmit failed "
422                                    "to requeue packet\n");
423         }
424 }
425
426 static struct ipoib_path *path_rec_create(struct net_device *dev,
427                                           union ib_gid *gid)
428 {
429         struct ipoib_dev_priv *priv = netdev_priv(dev);
430         struct ipoib_path *path;
431
432         path = kzalloc(sizeof *path, GFP_ATOMIC);
433         if (!path)
434                 return NULL;
435
436         path->dev = dev;
437
438         skb_queue_head_init(&path->queue);
439
440         INIT_LIST_HEAD(&path->neigh_list);
441
442         memcpy(path->pathrec.dgid.raw, gid->raw, sizeof (union ib_gid));
443         path->pathrec.sgid      = priv->local_gid;
444         path->pathrec.pkey      = cpu_to_be16(priv->pkey);
445         path->pathrec.numb_path = 1;
446
447         return path;
448 }
449
450 static int path_rec_start(struct net_device *dev,
451                           struct ipoib_path *path)
452 {
453         struct ipoib_dev_priv *priv = netdev_priv(dev);
454
455         ipoib_dbg(priv, "Start path record lookup for " IPOIB_GID_FMT "\n",
456                   IPOIB_GID_ARG(path->pathrec.dgid));
457
458         init_completion(&path->done);
459
460         path->query_id =
461                 ib_sa_path_rec_get(priv->ca, priv->port,
462                                    &path->pathrec,
463                                    IB_SA_PATH_REC_DGID          |
464                                    IB_SA_PATH_REC_SGID          |
465                                    IB_SA_PATH_REC_NUMB_PATH     |
466                                    IB_SA_PATH_REC_PKEY,
467                                    1000, GFP_ATOMIC,
468                                    path_rec_completion,
469                                    path, &path->query);
470         if (path->query_id < 0) {
471                 ipoib_warn(priv, "ib_sa_path_rec_get failed\n");
472                 path->query = NULL;
473                 return path->query_id;
474         }
475
476         return 0;
477 }
478
479 static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
480 {
481         struct ipoib_dev_priv *priv = netdev_priv(dev);
482         struct ipoib_path *path;
483         struct ipoib_neigh *neigh;
484
485         neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
486         if (!neigh) {
487                 ++priv->stats.tx_dropped;
488                 dev_kfree_skb_any(skb);
489                 return;
490         }
491
492         skb_queue_head_init(&neigh->queue);
493         neigh->neighbour = skb->dst->neighbour;
494         *to_ipoib_neigh(skb->dst->neighbour) = neigh;
495
496         /*
497          * We can only be called from ipoib_start_xmit, so we're
498          * inside tx_lock -- no need to save/restore flags.
499          */
500         spin_lock(&priv->lock);
501
502         path = __path_find(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4));
503         if (!path) {
504                 path = path_rec_create(dev,
505                                        (union ib_gid *) (skb->dst->neighbour->ha + 4));
506                 if (!path)
507                         goto err;
508
509                 __path_add(dev, path);
510         }
511
512         list_add_tail(&neigh->list, &path->neigh_list);
513
514         if (path->ah) {
515                 kref_get(&path->ah->ref);
516                 neigh->ah = path->ah;
517
518                 ipoib_send(dev, skb, path->ah,
519                            be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
520         } else {
521                 neigh->ah  = NULL;
522                 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
523                         __skb_queue_tail(&neigh->queue, skb);
524                 } else {
525                         ++priv->stats.tx_dropped;
526                         dev_kfree_skb_any(skb);
527                 }
528
529                 if (!path->query && path_rec_start(dev, path))
530                         goto err;
531         }
532
533         spin_unlock(&priv->lock);
534         return;
535
536 err:
537         *to_ipoib_neigh(skb->dst->neighbour) = NULL;
538         list_del(&neigh->list);
539         neigh->neighbour->ops->destructor = NULL;
540         kfree(neigh);
541
542         ++priv->stats.tx_dropped;
543         dev_kfree_skb_any(skb);
544
545         spin_unlock(&priv->lock);
546 }
547
548 static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
549 {
550         struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
551
552         /* Look up path record for unicasts */
553         if (skb->dst->neighbour->ha[4] != 0xff) {
554                 neigh_add_path(skb, dev);
555                 return;
556         }
557
558         /* Add in the P_Key for multicasts */
559         skb->dst->neighbour->ha[8] = (priv->pkey >> 8) & 0xff;
560         skb->dst->neighbour->ha[9] = priv->pkey & 0xff;
561         ipoib_mcast_send(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4), skb);
562 }
563
564 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
565                              struct ipoib_pseudoheader *phdr)
566 {
567         struct ipoib_dev_priv *priv = netdev_priv(dev);
568         struct ipoib_path *path;
569
570         /*
571          * We can only be called from ipoib_start_xmit, so we're
572          * inside tx_lock -- no need to save/restore flags.
573          */
574         spin_lock(&priv->lock);
575
576         path = __path_find(dev, (union ib_gid *) (phdr->hwaddr + 4));
577         if (!path) {
578                 path = path_rec_create(dev,
579                                        (union ib_gid *) (phdr->hwaddr + 4));
580                 if (path) {
581                         /* put pseudoheader back on for next time */
582                         skb_push(skb, sizeof *phdr);
583                         __skb_queue_tail(&path->queue, skb);
584
585                         if (path_rec_start(dev, path)) {
586                                 spin_unlock(&priv->lock);
587                                 path_free(dev, path);
588                                 return;
589                         } else
590                                 __path_add(dev, path);
591                 } else {
592                         ++priv->stats.tx_dropped;
593                         dev_kfree_skb_any(skb);
594                 }
595
596                 spin_unlock(&priv->lock);
597                 return;
598         }
599
600         if (path->ah) {
601                 ipoib_dbg(priv, "Send unicast ARP to %04x\n",
602                           be16_to_cpu(path->pathrec.dlid));
603
604                 ipoib_send(dev, skb, path->ah,
605                            be32_to_cpup((__be32 *) phdr->hwaddr));
606         } else if ((path->query || !path_rec_start(dev, path)) &&
607                    skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
608                 /* put pseudoheader back on for next time */
609                 skb_push(skb, sizeof *phdr);
610                 __skb_queue_tail(&path->queue, skb);
611         } else {
612                 ++priv->stats.tx_dropped;
613                 dev_kfree_skb_any(skb);
614         }
615
616         spin_unlock(&priv->lock);
617 }
618
619 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
620 {
621         struct ipoib_dev_priv *priv = netdev_priv(dev);
622         struct ipoib_neigh *neigh;
623         unsigned long flags;
624
625         if (!spin_trylock_irqsave(&priv->tx_lock, flags))
626                 return NETDEV_TX_LOCKED;
627
628         /*
629          * Check if our queue is stopped.  Since we have the LLTX bit
630          * set, we can't rely on netif_stop_queue() preventing our
631          * xmit function from being called with a full queue.
632          */
633         if (unlikely(netif_queue_stopped(dev))) {
634                 spin_unlock_irqrestore(&priv->tx_lock, flags);
635                 return NETDEV_TX_BUSY;
636         }
637
638         if (skb->dst && skb->dst->neighbour) {
639                 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
640                         ipoib_path_lookup(skb, dev);
641                         goto out;
642                 }
643
644                 neigh = *to_ipoib_neigh(skb->dst->neighbour);
645
646                 if (likely(neigh->ah)) {
647                         ipoib_send(dev, skb, neigh->ah,
648                                    be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
649                         goto out;
650                 }
651
652                 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
653                         spin_lock(&priv->lock);
654                         __skb_queue_tail(&neigh->queue, skb);
655                         spin_unlock(&priv->lock);
656                 } else {
657                         ++priv->stats.tx_dropped;
658                         dev_kfree_skb_any(skb);
659                 }
660         } else {
661                 struct ipoib_pseudoheader *phdr =
662                         (struct ipoib_pseudoheader *) skb->data;
663                 skb_pull(skb, sizeof *phdr);
664
665                 if (phdr->hwaddr[4] == 0xff) {
666                         /* Add in the P_Key for multicast*/
667                         phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
668                         phdr->hwaddr[9] = priv->pkey & 0xff;
669
670                         ipoib_mcast_send(dev, (union ib_gid *) (phdr->hwaddr + 4), skb);
671                 } else {
672                         /* unicast GID -- should be ARP or RARP reply */
673
674                         if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) &&
675                             (be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) {
676                                 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x "
677                                            IPOIB_GID_FMT "\n",
678                                            skb->dst ? "neigh" : "dst",
679                                            be16_to_cpup((__be16 *) skb->data),
680                                            be32_to_cpup((__be32 *) phdr->hwaddr),
681                                            IPOIB_GID_ARG(*(union ib_gid *) (phdr->hwaddr + 4)));
682                                 dev_kfree_skb_any(skb);
683                                 ++priv->stats.tx_dropped;
684                                 goto out;
685                         }
686
687                         unicast_arp_send(skb, dev, phdr);
688                 }
689         }
690
691 out:
692         spin_unlock_irqrestore(&priv->tx_lock, flags);
693
694         return NETDEV_TX_OK;
695 }
696
697 static struct net_device_stats *ipoib_get_stats(struct net_device *dev)
698 {
699         struct ipoib_dev_priv *priv = netdev_priv(dev);
700
701         return &priv->stats;
702 }
703
704 static void ipoib_timeout(struct net_device *dev)
705 {
706         struct ipoib_dev_priv *priv = netdev_priv(dev);
707
708         ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
709                    jiffies_to_msecs(jiffies - dev->trans_start));
710         ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
711                    netif_queue_stopped(dev),
712                    priv->tx_head, priv->tx_tail);
713         /* XXX reset QP, etc. */
714 }
715
716 static int ipoib_hard_header(struct sk_buff *skb,
717                              struct net_device *dev,
718                              unsigned short type,
719                              void *daddr, void *saddr, unsigned len)
720 {
721         struct ipoib_header *header;
722
723         header = (struct ipoib_header *) skb_push(skb, sizeof *header);
724
725         header->proto = htons(type);
726         header->reserved = 0;
727
728         /*
729          * If we don't have a neighbour structure, stuff the
730          * destination address onto the front of the skb so we can
731          * figure out where to send the packet later.
732          */
733         if (!skb->dst || !skb->dst->neighbour) {
734                 struct ipoib_pseudoheader *phdr =
735                         (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
736                 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
737         }
738
739         return 0;
740 }
741
742 static void ipoib_set_mcast_list(struct net_device *dev)
743 {
744         struct ipoib_dev_priv *priv = netdev_priv(dev);
745
746         queue_work(ipoib_workqueue, &priv->restart_task);
747 }
748
749 static void ipoib_neigh_destructor(struct neighbour *n)
750 {
751         struct ipoib_neigh *neigh;
752         struct ipoib_dev_priv *priv = netdev_priv(n->dev);
753         unsigned long flags;
754         struct ipoib_ah *ah = NULL;
755
756         ipoib_dbg(priv,
757                   "neigh_destructor for %06x " IPOIB_GID_FMT "\n",
758                   be32_to_cpup((__be32 *) n->ha),
759                   IPOIB_GID_ARG(*((union ib_gid *) (n->ha + 4))));
760
761         spin_lock_irqsave(&priv->lock, flags);
762
763         neigh = *to_ipoib_neigh(n);
764         if (neigh) {
765                 if (neigh->ah)
766                         ah = neigh->ah;
767                 list_del(&neigh->list);
768                 *to_ipoib_neigh(n) = NULL;
769                 kfree(neigh);
770         }
771
772         spin_unlock_irqrestore(&priv->lock, flags);
773
774         if (ah)
775                 ipoib_put_ah(ah);
776 }
777
778 static int ipoib_neigh_setup(struct neighbour *neigh)
779 {
780         /*
781          * Is this kosher?  I can't find anybody in the kernel that
782          * sets neigh->destructor, so we should be able to set it here
783          * without trouble.
784          */
785         neigh->ops->destructor = ipoib_neigh_destructor;
786
787         return 0;
788 }
789
790 static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms)
791 {
792         parms->neigh_setup = ipoib_neigh_setup;
793
794         return 0;
795 }
796
797 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
798 {
799         struct ipoib_dev_priv *priv = netdev_priv(dev);
800
801         /* Allocate RX/TX "rings" to hold queued skbs */
802
803         priv->rx_ring = kzalloc(IPOIB_RX_RING_SIZE * sizeof (struct ipoib_rx_buf),
804                                 GFP_KERNEL);
805         if (!priv->rx_ring) {
806                 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
807                        ca->name, IPOIB_RX_RING_SIZE);
808                 goto out;
809         }
810
811         priv->tx_ring = kzalloc(IPOIB_TX_RING_SIZE * sizeof (struct ipoib_tx_buf),
812                                 GFP_KERNEL);
813         if (!priv->tx_ring) {
814                 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
815                        ca->name, IPOIB_TX_RING_SIZE);
816                 goto out_rx_ring_cleanup;
817         }
818
819         /* priv->tx_head & tx_tail are already 0 */
820
821         if (ipoib_ib_dev_init(dev, ca, port))
822                 goto out_tx_ring_cleanup;
823
824         return 0;
825
826 out_tx_ring_cleanup:
827         kfree(priv->tx_ring);
828
829 out_rx_ring_cleanup:
830         kfree(priv->rx_ring);
831
832 out:
833         return -ENOMEM;
834 }
835
836 void ipoib_dev_cleanup(struct net_device *dev)
837 {
838         struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
839
840         ipoib_delete_debug_files(dev);
841
842         /* Delete any child interfaces first */
843         list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
844                 unregister_netdev(cpriv->dev);
845                 ipoib_dev_cleanup(cpriv->dev);
846                 free_netdev(cpriv->dev);
847         }
848
849         ipoib_ib_dev_cleanup(dev);
850
851         kfree(priv->rx_ring);
852         kfree(priv->tx_ring);
853
854         priv->rx_ring = NULL;
855         priv->tx_ring = NULL;
856 }
857
858 static void ipoib_setup(struct net_device *dev)
859 {
860         struct ipoib_dev_priv *priv = netdev_priv(dev);
861
862         dev->open                = ipoib_open;
863         dev->stop                = ipoib_stop;
864         dev->change_mtu          = ipoib_change_mtu;
865         dev->hard_start_xmit     = ipoib_start_xmit;
866         dev->get_stats           = ipoib_get_stats;
867         dev->tx_timeout          = ipoib_timeout;
868         dev->hard_header         = ipoib_hard_header;
869         dev->set_multicast_list  = ipoib_set_mcast_list;
870         dev->neigh_setup         = ipoib_neigh_setup_dev;
871
872         dev->watchdog_timeo      = HZ;
873
874         dev->flags              |= IFF_BROADCAST | IFF_MULTICAST;
875
876         /*
877          * We add in INFINIBAND_ALEN to allow for the destination
878          * address "pseudoheader" for skbs without neighbour struct.
879          */
880         dev->hard_header_len     = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
881         dev->addr_len            = INFINIBAND_ALEN;
882         dev->type                = ARPHRD_INFINIBAND;
883         dev->tx_queue_len        = IPOIB_TX_RING_SIZE * 2;
884         dev->features            = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
885
886         /* MTU will be reset when mcast join happens */
887         dev->mtu                 = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
888         priv->mcast_mtu          = priv->admin_mtu = dev->mtu;
889
890         memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
891
892         netif_carrier_off(dev);
893
894         SET_MODULE_OWNER(dev);
895
896         priv->dev = dev;
897
898         spin_lock_init(&priv->lock);
899         spin_lock_init(&priv->tx_lock);
900
901         mutex_init(&priv->mcast_mutex);
902         mutex_init(&priv->vlan_mutex);
903
904         INIT_LIST_HEAD(&priv->path_list);
905         INIT_LIST_HEAD(&priv->child_intfs);
906         INIT_LIST_HEAD(&priv->dead_ahs);
907         INIT_LIST_HEAD(&priv->multicast_list);
908
909         INIT_WORK(&priv->pkey_task,    ipoib_pkey_poll,          priv->dev);
910         INIT_WORK(&priv->mcast_task,   ipoib_mcast_join_task,    priv->dev);
911         INIT_WORK(&priv->flush_task,   ipoib_ib_dev_flush,       priv->dev);
912         INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev);
913         INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah,            priv->dev);
914 }
915
916 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
917 {
918         struct net_device *dev;
919
920         dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name,
921                            ipoib_setup);
922         if (!dev)
923                 return NULL;
924
925         return netdev_priv(dev);
926 }
927
928 static ssize_t show_pkey(struct class_device *cdev, char *buf)
929 {
930         struct ipoib_dev_priv *priv =
931                 netdev_priv(container_of(cdev, struct net_device, class_dev));
932
933         return sprintf(buf, "0x%04x\n", priv->pkey);
934 }
935 static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
936
937 static ssize_t create_child(struct class_device *cdev,
938                             const char *buf, size_t count)
939 {
940         int pkey;
941         int ret;
942
943         if (sscanf(buf, "%i", &pkey) != 1)
944                 return -EINVAL;
945
946         if (pkey < 0 || pkey > 0xffff)
947                 return -EINVAL;
948
949         /*
950          * Set the full membership bit, so that we join the right
951          * broadcast group, etc.
952          */
953         pkey |= 0x8000;
954
955         ret = ipoib_vlan_add(container_of(cdev, struct net_device, class_dev),
956                              pkey);
957
958         return ret ? ret : count;
959 }
960 static CLASS_DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
961
962 static ssize_t delete_child(struct class_device *cdev,
963                             const char *buf, size_t count)
964 {
965         int pkey;
966         int ret;
967
968         if (sscanf(buf, "%i", &pkey) != 1)
969                 return -EINVAL;
970
971         if (pkey < 0 || pkey > 0xffff)
972                 return -EINVAL;
973
974         ret = ipoib_vlan_delete(container_of(cdev, struct net_device, class_dev),
975                                 pkey);
976
977         return ret ? ret : count;
978
979 }
980 static CLASS_DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
981
982 int ipoib_add_pkey_attr(struct net_device *dev)
983 {
984         return class_device_create_file(&dev->class_dev,
985                                         &class_device_attr_pkey);
986 }
987
988 static struct net_device *ipoib_add_port(const char *format,
989                                          struct ib_device *hca, u8 port)
990 {
991         struct ipoib_dev_priv *priv;
992         int result = -ENOMEM;
993
994         priv = ipoib_intf_alloc(format);
995         if (!priv)
996                 goto alloc_mem_failed;
997
998         SET_NETDEV_DEV(priv->dev, hca->dma_device);
999
1000         result = ib_query_pkey(hca, port, 0, &priv->pkey);
1001         if (result) {
1002                 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
1003                        hca->name, port, result);
1004                 goto alloc_mem_failed;
1005         }
1006
1007         /*
1008          * Set the full membership bit, so that we join the right
1009          * broadcast group, etc.
1010          */
1011         priv->pkey |= 0x8000;
1012
1013         priv->dev->broadcast[8] = priv->pkey >> 8;
1014         priv->dev->broadcast[9] = priv->pkey & 0xff;
1015
1016         result = ib_query_gid(hca, port, 0, &priv->local_gid);
1017         if (result) {
1018                 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
1019                        hca->name, port, result);
1020                 goto alloc_mem_failed;
1021         } else
1022                 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
1023
1024
1025         result = ipoib_dev_init(priv->dev, hca, port);
1026         if (result < 0) {
1027                 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
1028                        hca->name, port, result);
1029                 goto device_init_failed;
1030         }
1031
1032         INIT_IB_EVENT_HANDLER(&priv->event_handler,
1033                               priv->ca, ipoib_event);
1034         result = ib_register_event_handler(&priv->event_handler);
1035         if (result < 0) {
1036                 printk(KERN_WARNING "%s: ib_register_event_handler failed for "
1037                        "port %d (ret = %d)\n",
1038                        hca->name, port, result);
1039                 goto event_failed;
1040         }
1041
1042         result = register_netdev(priv->dev);
1043         if (result) {
1044                 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
1045                        hca->name, port, result);
1046                 goto register_failed;
1047         }
1048
1049         ipoib_create_debug_files(priv->dev);
1050
1051         if (ipoib_add_pkey_attr(priv->dev))
1052                 goto sysfs_failed;
1053         if (class_device_create_file(&priv->dev->class_dev,
1054                                      &class_device_attr_create_child))
1055                 goto sysfs_failed;
1056         if (class_device_create_file(&priv->dev->class_dev,
1057                                      &class_device_attr_delete_child))
1058                 goto sysfs_failed;
1059
1060         return priv->dev;
1061
1062 sysfs_failed:
1063         ipoib_delete_debug_files(priv->dev);
1064         unregister_netdev(priv->dev);
1065
1066 register_failed:
1067         ib_unregister_event_handler(&priv->event_handler);
1068         flush_scheduled_work();
1069
1070 event_failed:
1071         ipoib_dev_cleanup(priv->dev);
1072
1073 device_init_failed:
1074         free_netdev(priv->dev);
1075
1076 alloc_mem_failed:
1077         return ERR_PTR(result);
1078 }
1079
1080 static void ipoib_add_one(struct ib_device *device)
1081 {
1082         struct list_head *dev_list;
1083         struct net_device *dev;
1084         struct ipoib_dev_priv *priv;
1085         int s, e, p;
1086
1087         dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1088         if (!dev_list)
1089                 return;
1090
1091         INIT_LIST_HEAD(dev_list);
1092
1093         if (device->node_type == IB_NODE_SWITCH) {
1094                 s = 0;
1095                 e = 0;
1096         } else {
1097                 s = 1;
1098                 e = device->phys_port_cnt;
1099         }
1100
1101         for (p = s; p <= e; ++p) {
1102                 dev = ipoib_add_port("ib%d", device, p);
1103                 if (!IS_ERR(dev)) {
1104                         priv = netdev_priv(dev);
1105                         list_add_tail(&priv->list, dev_list);
1106                 }
1107         }
1108
1109         ib_set_client_data(device, &ipoib_client, dev_list);
1110 }
1111
1112 static void ipoib_remove_one(struct ib_device *device)
1113 {
1114         struct ipoib_dev_priv *priv, *tmp;
1115         struct list_head *dev_list;
1116
1117         dev_list = ib_get_client_data(device, &ipoib_client);
1118
1119         list_for_each_entry_safe(priv, tmp, dev_list, list) {
1120                 ib_unregister_event_handler(&priv->event_handler);
1121                 flush_scheduled_work();
1122
1123                 unregister_netdev(priv->dev);
1124                 ipoib_dev_cleanup(priv->dev);
1125                 free_netdev(priv->dev);
1126         }
1127
1128         kfree(dev_list);
1129 }
1130
1131 static int __init ipoib_init_module(void)
1132 {
1133         int ret;
1134
1135         ret = ipoib_register_debugfs();
1136         if (ret)
1137                 return ret;
1138
1139         /*
1140          * We create our own workqueue mainly because we want to be
1141          * able to flush it when devices are being removed.  We can't
1142          * use schedule_work()/flush_scheduled_work() because both
1143          * unregister_netdev() and linkwatch_event take the rtnl lock,
1144          * so flush_scheduled_work() can deadlock during device
1145          * removal.
1146          */
1147         ipoib_workqueue = create_singlethread_workqueue("ipoib");
1148         if (!ipoib_workqueue) {
1149                 ret = -ENOMEM;
1150                 goto err_fs;
1151         }
1152
1153         ret = ib_register_client(&ipoib_client);
1154         if (ret)
1155                 goto err_wq;
1156
1157         return 0;
1158
1159 err_wq:
1160         destroy_workqueue(ipoib_workqueue);
1161
1162 err_fs:
1163         ipoib_unregister_debugfs();
1164
1165         return ret;
1166 }
1167
1168 static void __exit ipoib_cleanup_module(void)
1169 {
1170         ib_unregister_client(&ipoib_client);
1171         ipoib_unregister_debugfs();
1172         destroy_workqueue(ipoib_workqueue);
1173 }
1174
1175 module_init(ipoib_init_module);
1176 module_exit(ipoib_cleanup_module);