2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/module.h>
39 #include <linux/init.h>
40 #include <linux/slab.h>
41 #include <linux/kernel.h>
42 #include <linux/vmalloc.h>
44 #include <linux/if_arp.h> /* For ARPHRD_xxx */
49 #include <linux/jhash.h>
51 #include <net/addrconf.h>
52 #include <linux/inetdevice.h>
53 #include <rdma/ib_cache.h>
55 #define DRV_VERSION "1.0.0"
57 const char ipoib_driver_version[] = DRV_VERSION;
59 MODULE_AUTHOR("Roland Dreier");
60 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
61 MODULE_LICENSE("Dual BSD/GPL");
63 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
64 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
66 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
67 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
68 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
69 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
71 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
72 int ipoib_debug_level;
74 module_param_named(debug_level, ipoib_debug_level, int, 0644);
75 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
78 struct ipoib_path_iter {
79 struct net_device *dev;
80 struct ipoib_path path;
83 static const u8 ipv4_bcast_addr[] = {
84 0x00, 0xff, 0xff, 0xff,
85 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
86 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
89 struct workqueue_struct *ipoib_workqueue;
91 struct ib_sa_client ipoib_sa_client;
93 static void ipoib_add_one(struct ib_device *device);
94 static void ipoib_remove_one(struct ib_device *device, void *client_data);
95 static void ipoib_neigh_reclaim(struct rcu_head *rp);
96 static struct net_device *ipoib_get_net_dev_by_params(
97 struct ib_device *dev, u8 port, u16 pkey,
98 const union ib_gid *gid, const struct sockaddr *addr,
100 static int ipoib_set_mac(struct net_device *dev, void *addr);
101 static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
104 static struct ib_client ipoib_client = {
106 .add = ipoib_add_one,
107 .remove = ipoib_remove_one,
108 .get_net_dev_by_params = ipoib_get_net_dev_by_params,
111 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
112 static int ipoib_netdev_event(struct notifier_block *this,
113 unsigned long event, void *ptr)
115 struct netdev_notifier_info *ni = ptr;
116 struct net_device *dev = ni->dev;
118 if (dev->netdev_ops->ndo_open != ipoib_open)
122 case NETDEV_REGISTER:
123 ipoib_create_debug_files(dev);
125 case NETDEV_CHANGENAME:
126 ipoib_delete_debug_files(dev);
127 ipoib_create_debug_files(dev);
129 case NETDEV_UNREGISTER:
130 ipoib_delete_debug_files(dev);
138 int ipoib_open(struct net_device *dev)
140 struct ipoib_dev_priv *priv = ipoib_priv(dev);
142 ipoib_dbg(priv, "bringing up interface\n");
144 netif_carrier_off(dev);
146 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
148 priv->sm_fullmember_sendonly_support = false;
150 if (ipoib_ib_dev_open(dev)) {
151 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
156 ipoib_ib_dev_up(dev);
158 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
159 struct ipoib_dev_priv *cpriv;
161 /* Bring up any child interfaces too */
162 down_read(&priv->vlan_rwsem);
163 list_for_each_entry(cpriv, &priv->child_intfs, list) {
166 flags = cpriv->dev->flags;
170 dev_change_flags(cpriv->dev, flags | IFF_UP);
172 up_read(&priv->vlan_rwsem);
175 netif_start_queue(dev);
180 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
185 static int ipoib_stop(struct net_device *dev)
187 struct ipoib_dev_priv *priv = ipoib_priv(dev);
189 ipoib_dbg(priv, "stopping interface\n");
191 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
193 netif_stop_queue(dev);
195 ipoib_ib_dev_down(dev);
196 ipoib_ib_dev_stop(dev);
198 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
199 struct ipoib_dev_priv *cpriv;
201 /* Bring down any child interfaces too */
202 down_read(&priv->vlan_rwsem);
203 list_for_each_entry(cpriv, &priv->child_intfs, list) {
206 flags = cpriv->dev->flags;
207 if (!(flags & IFF_UP))
210 dev_change_flags(cpriv->dev, flags & ~IFF_UP);
212 up_read(&priv->vlan_rwsem);
218 static void ipoib_uninit(struct net_device *dev)
220 ipoib_dev_cleanup(dev);
223 static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
225 struct ipoib_dev_priv *priv = ipoib_priv(dev);
227 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
228 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
233 static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
235 struct ipoib_dev_priv *priv = ipoib_priv(dev);
238 /* dev->mtu > 2K ==> connected mode */
239 if (ipoib_cm_admin_enabled(dev)) {
240 if (new_mtu > ipoib_cm_max_mtu(dev))
243 if (new_mtu > priv->mcast_mtu)
244 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
251 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
254 priv->admin_mtu = new_mtu;
256 if (priv->mcast_mtu < priv->admin_mtu)
257 ipoib_dbg(priv, "MTU must be smaller than the underlying "
258 "link layer MTU - 4 (%u)\n", priv->mcast_mtu);
260 new_mtu = min(priv->mcast_mtu, priv->admin_mtu);
262 if (priv->rn_ops->ndo_change_mtu) {
263 bool carrier_status = netif_carrier_ok(dev);
265 netif_carrier_off(dev);
267 /* notify lower level on the real mtu */
268 ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu);
271 netif_carrier_on(dev);
279 static void ipoib_get_stats(struct net_device *dev,
280 struct rtnl_link_stats64 *stats)
282 struct ipoib_dev_priv *priv = ipoib_priv(dev);
284 if (priv->rn_ops->ndo_get_stats64)
285 priv->rn_ops->ndo_get_stats64(dev, stats);
287 netdev_stats_to_stats64(stats, &dev->stats);
290 /* Called with an RCU read lock taken */
291 static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr,
292 struct net_device *dev)
294 struct net *net = dev_net(dev);
295 struct in_device *in_dev;
296 struct sockaddr_in *addr_in = (struct sockaddr_in *)addr;
297 struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)addr;
300 switch (addr->sa_family) {
302 in_dev = in_dev_get(dev);
306 ret_addr = inet_confirm_addr(net, in_dev, 0,
307 addr_in->sin_addr.s_addr,
315 if (IS_ENABLED(CONFIG_IPV6) &&
316 ipv6_chk_addr(net, &addr_in6->sin6_addr, dev, 1))
325 * Find the master net_device on top of the given net_device.
326 * @dev: base IPoIB net_device
328 * Returns the master net_device with a reference held, or the same net_device
329 * if no master exists.
331 static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
333 struct net_device *master;
336 master = netdev_master_upper_dev_get_rcu(dev);
348 struct ipoib_walk_data {
349 const struct sockaddr *addr;
350 struct net_device *result;
353 static int ipoib_upper_walk(struct net_device *upper, void *_data)
355 struct ipoib_walk_data *data = _data;
358 if (ipoib_is_dev_match_addr_rcu(data->addr, upper)) {
360 data->result = upper;
368 * Find a net_device matching the given address, which is an upper device of
369 * the given net_device.
370 * @addr: IP address to look for.
371 * @dev: base IPoIB net_device
373 * If found, returns the net_device with a reference held. Otherwise return
376 static struct net_device *ipoib_get_net_dev_match_addr(
377 const struct sockaddr *addr, struct net_device *dev)
379 struct ipoib_walk_data data = {
384 if (ipoib_is_dev_match_addr_rcu(addr, dev)) {
390 netdev_walk_all_upper_dev_rcu(dev, ipoib_upper_walk, &data);
396 /* returns the number of IPoIB netdevs on top a given ipoib device matching a
397 * pkey_index and address, if one exists.
399 * @found_net_dev: contains a matching net_device if the return value >= 1,
400 * with a reference held. */
401 static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv,
402 const union ib_gid *gid,
404 const struct sockaddr *addr,
406 struct net_device **found_net_dev)
408 struct ipoib_dev_priv *child_priv;
409 struct net_device *net_dev = NULL;
412 if (priv->pkey_index == pkey_index &&
413 (!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) {
415 net_dev = ipoib_get_master_net_dev(priv->dev);
417 /* Verify the net_device matches the IP address, as
418 * IPoIB child devices currently share a GID. */
419 net_dev = ipoib_get_net_dev_match_addr(addr, priv->dev);
423 *found_net_dev = net_dev;
430 /* Check child interfaces */
431 down_read_nested(&priv->vlan_rwsem, nesting);
432 list_for_each_entry(child_priv, &priv->child_intfs, list) {
433 matches += ipoib_match_gid_pkey_addr(child_priv, gid,
440 up_read(&priv->vlan_rwsem);
445 /* Returns the number of matching net_devs found (between 0 and 2). Also
446 * return the matching net_device in the @net_dev parameter, holding a
447 * reference to the net_device, if the number of matches >= 1 */
448 static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port,
450 const union ib_gid *gid,
451 const struct sockaddr *addr,
452 struct net_device **net_dev)
454 struct ipoib_dev_priv *priv;
459 list_for_each_entry(priv, dev_list, list) {
460 if (priv->port != port)
463 matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index,
472 static struct net_device *ipoib_get_net_dev_by_params(
473 struct ib_device *dev, u8 port, u16 pkey,
474 const union ib_gid *gid, const struct sockaddr *addr,
477 struct net_device *net_dev;
478 struct list_head *dev_list = client_data;
483 if (!rdma_protocol_ib(dev, port))
486 ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index);
493 /* See if we can find a unique device matching the L2 parameters */
494 matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
495 gid, NULL, &net_dev);
506 /* Couldn't find a unique device with L2 parameters only. Use L3
507 * address to uniquely match the net device */
508 matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
509 gid, addr, &net_dev);
514 dev_warn_ratelimited(&dev->dev,
515 "duplicate IP address detected\n");
522 int ipoib_set_mode(struct net_device *dev, const char *buf)
524 struct ipoib_dev_priv *priv = ipoib_priv(dev);
526 if ((test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
527 !strcmp(buf, "connected\n")) ||
528 (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
529 !strcmp(buf, "datagram\n"))) {
533 /* flush paths if we switch modes so that connections are restarted */
534 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
535 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
536 ipoib_warn(priv, "enabling connected mode "
537 "will cause multicast packet drops\n");
538 netdev_update_features(dev);
539 dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
541 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
543 ipoib_flush_paths(dev);
544 return (!rtnl_trylock()) ? -EBUSY : 0;
547 if (!strcmp(buf, "datagram\n")) {
548 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
549 netdev_update_features(dev);
550 dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
552 ipoib_flush_paths(dev);
553 return (!rtnl_trylock()) ? -EBUSY : 0;
559 struct ipoib_path *__path_find(struct net_device *dev, void *gid)
561 struct ipoib_dev_priv *priv = ipoib_priv(dev);
562 struct rb_node *n = priv->path_tree.rb_node;
563 struct ipoib_path *path;
567 path = rb_entry(n, struct ipoib_path, rb_node);
569 ret = memcmp(gid, path->pathrec.dgid.raw,
570 sizeof (union ib_gid));
583 static int __path_add(struct net_device *dev, struct ipoib_path *path)
585 struct ipoib_dev_priv *priv = ipoib_priv(dev);
586 struct rb_node **n = &priv->path_tree.rb_node;
587 struct rb_node *pn = NULL;
588 struct ipoib_path *tpath;
593 tpath = rb_entry(pn, struct ipoib_path, rb_node);
595 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
596 sizeof (union ib_gid));
605 rb_link_node(&path->rb_node, pn, n);
606 rb_insert_color(&path->rb_node, &priv->path_tree);
608 list_add_tail(&path->list, &priv->path_list);
613 static void path_free(struct net_device *dev, struct ipoib_path *path)
617 while ((skb = __skb_dequeue(&path->queue)))
618 dev_kfree_skb_irq(skb);
620 ipoib_dbg(ipoib_priv(dev), "path_free\n");
622 /* remove all neigh connected to this path */
623 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
626 ipoib_put_ah(path->ah);
631 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
633 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
635 struct ipoib_path_iter *iter;
637 iter = kmalloc(sizeof *iter, GFP_KERNEL);
642 memset(iter->path.pathrec.dgid.raw, 0, 16);
644 if (ipoib_path_iter_next(iter)) {
652 int ipoib_path_iter_next(struct ipoib_path_iter *iter)
654 struct ipoib_dev_priv *priv = ipoib_priv(iter->dev);
656 struct ipoib_path *path;
659 spin_lock_irq(&priv->lock);
661 n = rb_first(&priv->path_tree);
664 path = rb_entry(n, struct ipoib_path, rb_node);
666 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
667 sizeof (union ib_gid)) < 0) {
676 spin_unlock_irq(&priv->lock);
681 void ipoib_path_iter_read(struct ipoib_path_iter *iter,
682 struct ipoib_path *path)
687 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
689 void ipoib_mark_paths_invalid(struct net_device *dev)
691 struct ipoib_dev_priv *priv = ipoib_priv(dev);
692 struct ipoib_path *path, *tp;
694 spin_lock_irq(&priv->lock);
696 list_for_each_entry_safe(path, tp, &priv->path_list, list) {
697 ipoib_dbg(priv, "mark path LID 0x%08x GID %pI6 invalid\n",
698 be32_to_cpu(sa_path_get_dlid(&path->pathrec)),
699 path->pathrec.dgid.raw);
704 spin_unlock_irq(&priv->lock);
707 static void push_pseudo_header(struct sk_buff *skb, const char *daddr)
709 struct ipoib_pseudo_header *phdr;
711 phdr = skb_push(skb, sizeof(*phdr));
712 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
715 void ipoib_flush_paths(struct net_device *dev)
717 struct ipoib_dev_priv *priv = ipoib_priv(dev);
718 struct ipoib_path *path, *tp;
719 LIST_HEAD(remove_list);
722 netif_tx_lock_bh(dev);
723 spin_lock_irqsave(&priv->lock, flags);
725 list_splice_init(&priv->path_list, &remove_list);
727 list_for_each_entry(path, &remove_list, list)
728 rb_erase(&path->rb_node, &priv->path_tree);
730 list_for_each_entry_safe(path, tp, &remove_list, list) {
732 ib_sa_cancel_query(path->query_id, path->query);
733 spin_unlock_irqrestore(&priv->lock, flags);
734 netif_tx_unlock_bh(dev);
735 wait_for_completion(&path->done);
736 path_free(dev, path);
737 netif_tx_lock_bh(dev);
738 spin_lock_irqsave(&priv->lock, flags);
741 spin_unlock_irqrestore(&priv->lock, flags);
742 netif_tx_unlock_bh(dev);
745 static void path_rec_completion(int status,
746 struct sa_path_rec *pathrec,
749 struct ipoib_path *path = path_ptr;
750 struct net_device *dev = path->dev;
751 struct ipoib_dev_priv *priv = ipoib_priv(dev);
752 struct ipoib_ah *ah = NULL;
753 struct ipoib_ah *old_ah = NULL;
754 struct ipoib_neigh *neigh, *tn;
755 struct sk_buff_head skqueue;
760 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n",
761 be32_to_cpu(sa_path_get_dlid(pathrec)),
764 ipoib_dbg(priv, "PathRec status %d for GID %pI6\n",
765 status, path->pathrec.dgid.raw);
767 skb_queue_head_init(&skqueue);
770 struct rdma_ah_attr av;
772 if (!ib_init_ah_attr_from_path(priv->ca, priv->port,
774 ah = ipoib_create_ah(dev, priv->pd, &av);
777 spin_lock_irqsave(&priv->lock, flags);
779 if (!IS_ERR_OR_NULL(ah)) {
781 * pathrec.dgid is used as the database key from the LLADDR,
782 * it must remain unchanged even if the SA returns a different
783 * GID to use in the AH.
785 if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw,
786 sizeof(union ib_gid))) {
789 "%s got PathRec for gid %pI6 while asked for %pI6\n",
790 dev->name, pathrec->dgid.raw,
791 path->pathrec.dgid.raw);
792 memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw,
793 sizeof(union ib_gid));
796 path->pathrec = *pathrec;
801 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
802 ah, be32_to_cpu(sa_path_get_dlid(pathrec)),
805 while ((skb = __skb_dequeue(&path->queue)))
806 __skb_queue_tail(&skqueue, skb);
808 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
810 WARN_ON(neigh->ah != old_ah);
812 * Dropping the ah reference inside
813 * priv->lock is safe here, because we
814 * will hold one more reference from
815 * the original value of path->ah (ie
818 ipoib_put_ah(neigh->ah);
820 kref_get(&path->ah->ref);
821 neigh->ah = path->ah;
823 if (ipoib_cm_enabled(dev, neigh->daddr)) {
824 if (!ipoib_cm_get(neigh))
825 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
828 if (!ipoib_cm_get(neigh)) {
829 ipoib_neigh_free(neigh);
834 while ((skb = __skb_dequeue(&neigh->queue)))
835 __skb_queue_tail(&skqueue, skb);
841 complete(&path->done);
843 spin_unlock_irqrestore(&priv->lock, flags);
845 if (IS_ERR_OR_NULL(ah))
846 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
849 ipoib_put_ah(old_ah);
851 while ((skb = __skb_dequeue(&skqueue))) {
854 ret = dev_queue_xmit(skb);
856 ipoib_warn(priv, "%s: dev_queue_xmit failed to re-queue packet, ret:%d\n",
861 static void init_path_rec(struct ipoib_dev_priv *priv, struct ipoib_path *path,
864 path->dev = priv->dev;
866 if (rdma_cap_opa_ah(priv->ca, priv->port))
867 path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA;
869 path->pathrec.rec_type = SA_PATH_REC_TYPE_IB;
871 memcpy(path->pathrec.dgid.raw, gid, sizeof(union ib_gid));
872 path->pathrec.sgid = priv->local_gid;
873 path->pathrec.pkey = cpu_to_be16(priv->pkey);
874 path->pathrec.numb_path = 1;
875 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
878 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
880 struct ipoib_dev_priv *priv = ipoib_priv(dev);
881 struct ipoib_path *path;
883 if (!priv->broadcast)
886 path = kzalloc(sizeof *path, GFP_ATOMIC);
890 skb_queue_head_init(&path->queue);
892 INIT_LIST_HEAD(&path->neigh_list);
894 init_path_rec(priv, path, gid);
899 static int path_rec_start(struct net_device *dev,
900 struct ipoib_path *path)
902 struct ipoib_dev_priv *priv = ipoib_priv(dev);
904 ipoib_dbg(priv, "Start path record lookup for %pI6\n",
905 path->pathrec.dgid.raw);
907 init_completion(&path->done);
910 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
912 IB_SA_PATH_REC_DGID |
913 IB_SA_PATH_REC_SGID |
914 IB_SA_PATH_REC_NUMB_PATH |
915 IB_SA_PATH_REC_TRAFFIC_CLASS |
920 if (path->query_id < 0) {
921 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
923 complete(&path->done);
924 return path->query_id;
930 static void neigh_refresh_path(struct ipoib_neigh *neigh, u8 *daddr,
931 struct net_device *dev)
933 struct ipoib_dev_priv *priv = ipoib_priv(dev);
934 struct ipoib_path *path;
937 spin_lock_irqsave(&priv->lock, flags);
939 path = __path_find(dev, daddr + 4);
943 path_rec_start(dev, path);
945 spin_unlock_irqrestore(&priv->lock, flags);
948 static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
949 struct net_device *dev)
951 struct ipoib_dev_priv *priv = ipoib_priv(dev);
952 struct rdma_netdev *rn = netdev_priv(dev);
953 struct ipoib_path *path;
954 struct ipoib_neigh *neigh;
957 spin_lock_irqsave(&priv->lock, flags);
958 neigh = ipoib_neigh_alloc(daddr, dev);
960 spin_unlock_irqrestore(&priv->lock, flags);
961 ++dev->stats.tx_dropped;
962 dev_kfree_skb_any(skb);
966 /* To avoid race condition, make sure that the
967 * neigh will be added only once.
969 if (unlikely(!list_empty(&neigh->list))) {
970 spin_unlock_irqrestore(&priv->lock, flags);
974 path = __path_find(dev, daddr + 4);
976 path = path_rec_create(dev, daddr + 4);
980 __path_add(dev, path);
983 list_add_tail(&neigh->list, &path->neigh_list);
985 if (path->ah && path->ah->valid) {
986 kref_get(&path->ah->ref);
987 neigh->ah = path->ah;
989 if (ipoib_cm_enabled(dev, neigh->daddr)) {
990 if (!ipoib_cm_get(neigh))
991 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
992 if (!ipoib_cm_get(neigh)) {
993 ipoib_neigh_free(neigh);
996 if (skb_queue_len(&neigh->queue) <
997 IPOIB_MAX_PATH_REC_QUEUE) {
998 push_pseudo_header(skb, neigh->daddr);
999 __skb_queue_tail(&neigh->queue, skb);
1001 ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
1002 skb_queue_len(&neigh->queue));
1006 spin_unlock_irqrestore(&priv->lock, flags);
1007 path->ah->last_send = rn->send(dev, skb, path->ah->ah,
1009 ipoib_neigh_put(neigh);
1015 if (!path->query && path_rec_start(dev, path))
1017 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1018 push_pseudo_header(skb, neigh->daddr);
1019 __skb_queue_tail(&neigh->queue, skb);
1025 spin_unlock_irqrestore(&priv->lock, flags);
1026 ipoib_neigh_put(neigh);
1030 ipoib_neigh_free(neigh);
1032 ++dev->stats.tx_dropped;
1033 dev_kfree_skb_any(skb);
1035 spin_unlock_irqrestore(&priv->lock, flags);
1036 ipoib_neigh_put(neigh);
1041 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
1042 struct ipoib_pseudo_header *phdr)
1044 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1045 struct rdma_netdev *rn = netdev_priv(dev);
1046 struct ipoib_path *path;
1047 unsigned long flags;
1049 spin_lock_irqsave(&priv->lock, flags);
1051 /* no broadcast means that all paths are (going to be) not valid */
1052 if (!priv->broadcast)
1053 goto drop_and_unlock;
1055 path = __path_find(dev, phdr->hwaddr + 4);
1056 if (!path || !path->ah || !path->ah->valid) {
1058 path = path_rec_create(dev, phdr->hwaddr + 4);
1060 goto drop_and_unlock;
1061 __path_add(dev, path);
1064 * make sure there are no changes in the existing
1067 init_path_rec(priv, path, phdr->hwaddr + 4);
1069 if (!path->query && path_rec_start(dev, path)) {
1070 goto drop_and_unlock;
1073 if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1074 push_pseudo_header(skb, phdr->hwaddr);
1075 __skb_queue_tail(&path->queue, skb);
1078 goto drop_and_unlock;
1082 spin_unlock_irqrestore(&priv->lock, flags);
1083 ipoib_dbg(priv, "Send unicast ARP to %08x\n",
1084 be32_to_cpu(sa_path_get_dlid(&path->pathrec)));
1085 path->ah->last_send = rn->send(dev, skb, path->ah->ah,
1086 IPOIB_QPN(phdr->hwaddr));
1090 ++dev->stats.tx_dropped;
1091 dev_kfree_skb_any(skb);
1093 spin_unlock_irqrestore(&priv->lock, flags);
1096 static netdev_tx_t ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
1098 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1099 struct rdma_netdev *rn = netdev_priv(dev);
1100 struct ipoib_neigh *neigh;
1101 struct ipoib_pseudo_header *phdr;
1102 struct ipoib_header *header;
1103 unsigned long flags;
1105 phdr = (struct ipoib_pseudo_header *) skb->data;
1106 skb_pull(skb, sizeof(*phdr));
1107 header = (struct ipoib_header *) skb->data;
1109 if (unlikely(phdr->hwaddr[4] == 0xff)) {
1110 /* multicast, arrange "if" according to probability */
1111 if ((header->proto != htons(ETH_P_IP)) &&
1112 (header->proto != htons(ETH_P_IPV6)) &&
1113 (header->proto != htons(ETH_P_ARP)) &&
1114 (header->proto != htons(ETH_P_RARP)) &&
1115 (header->proto != htons(ETH_P_TIPC))) {
1116 /* ethertype not supported by IPoIB */
1117 ++dev->stats.tx_dropped;
1118 dev_kfree_skb_any(skb);
1119 return NETDEV_TX_OK;
1121 /* Add in the P_Key for multicast*/
1122 phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
1123 phdr->hwaddr[9] = priv->pkey & 0xff;
1125 neigh = ipoib_neigh_get(dev, phdr->hwaddr);
1127 goto send_using_neigh;
1128 ipoib_mcast_send(dev, phdr->hwaddr, skb);
1129 return NETDEV_TX_OK;
1132 /* unicast, arrange "switch" according to probability */
1133 switch (header->proto) {
1134 case htons(ETH_P_IP):
1135 case htons(ETH_P_IPV6):
1136 case htons(ETH_P_TIPC):
1137 neigh = ipoib_neigh_get(dev, phdr->hwaddr);
1138 if (unlikely(!neigh)) {
1139 neigh = neigh_add_path(skb, phdr->hwaddr, dev);
1141 return NETDEV_TX_OK;
1144 case htons(ETH_P_ARP):
1145 case htons(ETH_P_RARP):
1146 /* for unicast ARP and RARP should always perform path find */
1147 unicast_arp_send(skb, dev, phdr);
1148 return NETDEV_TX_OK;
1150 /* ethertype not supported by IPoIB */
1151 ++dev->stats.tx_dropped;
1152 dev_kfree_skb_any(skb);
1153 return NETDEV_TX_OK;
1157 /* note we now hold a ref to neigh */
1158 if (ipoib_cm_get(neigh)) {
1159 if (ipoib_cm_up(neigh)) {
1160 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
1163 } else if (neigh->ah && neigh->ah->valid) {
1164 neigh->ah->last_send = rn->send(dev, skb, neigh->ah->ah,
1165 IPOIB_QPN(phdr->hwaddr));
1167 } else if (neigh->ah) {
1168 neigh_refresh_path(neigh, phdr->hwaddr, dev);
1171 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1172 push_pseudo_header(skb, phdr->hwaddr);
1173 spin_lock_irqsave(&priv->lock, flags);
1174 __skb_queue_tail(&neigh->queue, skb);
1175 spin_unlock_irqrestore(&priv->lock, flags);
1177 ++dev->stats.tx_dropped;
1178 dev_kfree_skb_any(skb);
1182 ipoib_neigh_put(neigh);
1184 return NETDEV_TX_OK;
1187 static void ipoib_timeout(struct net_device *dev)
1189 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1191 ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
1192 jiffies_to_msecs(jiffies - dev_trans_start(dev)));
1193 ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
1194 netif_queue_stopped(dev),
1195 priv->tx_head, priv->tx_tail);
1196 /* XXX reset QP, etc. */
1199 static int ipoib_hard_header(struct sk_buff *skb,
1200 struct net_device *dev,
1201 unsigned short type,
1202 const void *daddr, const void *saddr, unsigned len)
1204 struct ipoib_header *header;
1206 header = skb_push(skb, sizeof *header);
1208 header->proto = htons(type);
1209 header->reserved = 0;
1212 * we don't rely on dst_entry structure, always stuff the
1213 * destination address into skb hard header so we can figure out where
1214 * to send the packet later.
1216 push_pseudo_header(skb, daddr);
1218 return IPOIB_HARD_LEN;
1221 static void ipoib_set_mcast_list(struct net_device *dev)
1223 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1225 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
1226 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
1230 queue_work(priv->wq, &priv->restart_task);
1233 static int ipoib_get_iflink(const struct net_device *dev)
1235 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1237 /* parent interface */
1238 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
1239 return dev->ifindex;
1241 /* child/vlan interface */
1242 return priv->parent->ifindex;
1245 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
1248 * Use only the address parts that contributes to spreading
1249 * The subnet prefix is not used as one can not connect to
1250 * same remote port (GUID) using the same remote QPN via two
1251 * different subnets.
1253 /* qpn octets[1:4) & port GUID octets[12:20) */
1254 u32 *d32 = (u32 *) daddr;
1257 hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0);
1258 return hv & htbl->mask;
1261 struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
1263 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1264 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1265 struct ipoib_neigh_hash *htbl;
1266 struct ipoib_neigh *neigh = NULL;
1271 htbl = rcu_dereference_bh(ntbl->htbl);
1276 hash_val = ipoib_addr_hash(htbl, daddr);
1277 for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]);
1279 neigh = rcu_dereference_bh(neigh->hnext)) {
1280 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
1281 /* found, take one ref on behalf of the caller */
1282 if (!atomic_inc_not_zero(&neigh->refcnt)) {
1288 if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
1289 neigh->alive = jiffies;
1295 rcu_read_unlock_bh();
1299 static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
1301 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1302 struct ipoib_neigh_hash *htbl;
1303 unsigned long neigh_obsolete;
1305 unsigned long flags;
1307 LIST_HEAD(remove_list);
1309 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
1312 spin_lock_irqsave(&priv->lock, flags);
1314 htbl = rcu_dereference_protected(ntbl->htbl,
1315 lockdep_is_held(&priv->lock));
1320 /* neigh is obsolete if it was idle for two GC periods */
1321 dt = 2 * arp_tbl.gc_interval;
1322 neigh_obsolete = jiffies - dt;
1323 /* handle possible race condition */
1324 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
1327 for (i = 0; i < htbl->size; i++) {
1328 struct ipoib_neigh *neigh;
1329 struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1331 while ((neigh = rcu_dereference_protected(*np,
1332 lockdep_is_held(&priv->lock))) != NULL) {
1333 /* was the neigh idle for two GC periods */
1334 if (time_after(neigh_obsolete, neigh->alive)) {
1336 ipoib_check_and_add_mcast_sendonly(priv, neigh->daddr + 4, &remove_list);
1338 rcu_assign_pointer(*np,
1339 rcu_dereference_protected(neigh->hnext,
1340 lockdep_is_held(&priv->lock)));
1341 /* remove from path/mc list */
1342 list_del_init(&neigh->list);
1343 call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1352 spin_unlock_irqrestore(&priv->lock, flags);
1353 ipoib_mcast_remove_list(&remove_list);
1356 static void ipoib_reap_neigh(struct work_struct *work)
1358 struct ipoib_dev_priv *priv =
1359 container_of(work, struct ipoib_dev_priv, neigh_reap_task.work);
1361 __ipoib_reap_neigh(priv);
1363 if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
1364 queue_delayed_work(priv->wq, &priv->neigh_reap_task,
1365 arp_tbl.gc_interval);
1369 static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr,
1370 struct net_device *dev)
1372 struct ipoib_neigh *neigh;
1374 neigh = kzalloc(sizeof *neigh, GFP_ATOMIC);
1379 memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr));
1380 skb_queue_head_init(&neigh->queue);
1381 INIT_LIST_HEAD(&neigh->list);
1382 ipoib_cm_set(neigh, NULL);
1383 /* one ref on behalf of the caller */
1384 atomic_set(&neigh->refcnt, 1);
1389 struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
1390 struct net_device *dev)
1392 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1393 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1394 struct ipoib_neigh_hash *htbl;
1395 struct ipoib_neigh *neigh;
1398 htbl = rcu_dereference_protected(ntbl->htbl,
1399 lockdep_is_held(&priv->lock));
1405 /* need to add a new neigh, but maybe some other thread succeeded?
1406 * recalc hash, maybe hash resize took place so we do a search
1408 hash_val = ipoib_addr_hash(htbl, daddr);
1409 for (neigh = rcu_dereference_protected(htbl->buckets[hash_val],
1410 lockdep_is_held(&priv->lock));
1412 neigh = rcu_dereference_protected(neigh->hnext,
1413 lockdep_is_held(&priv->lock))) {
1414 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
1415 /* found, take one ref on behalf of the caller */
1416 if (!atomic_inc_not_zero(&neigh->refcnt)) {
1421 neigh->alive = jiffies;
1426 neigh = ipoib_neigh_ctor(daddr, dev);
1430 /* one ref on behalf of the hash table */
1431 atomic_inc(&neigh->refcnt);
1432 neigh->alive = jiffies;
1434 rcu_assign_pointer(neigh->hnext,
1435 rcu_dereference_protected(htbl->buckets[hash_val],
1436 lockdep_is_held(&priv->lock)));
1437 rcu_assign_pointer(htbl->buckets[hash_val], neigh);
1438 atomic_inc(&ntbl->entries);
1445 void ipoib_neigh_dtor(struct ipoib_neigh *neigh)
1447 /* neigh reference count was dropprd to zero */
1448 struct net_device *dev = neigh->dev;
1449 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1450 struct sk_buff *skb;
1452 ipoib_put_ah(neigh->ah);
1453 while ((skb = __skb_dequeue(&neigh->queue))) {
1454 ++dev->stats.tx_dropped;
1455 dev_kfree_skb_any(skb);
1457 if (ipoib_cm_get(neigh))
1458 ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
1459 ipoib_dbg(ipoib_priv(dev),
1460 "neigh free for %06x %pI6\n",
1461 IPOIB_QPN(neigh->daddr),
1464 if (atomic_dec_and_test(&priv->ntbl.entries)) {
1465 if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags))
1466 complete(&priv->ntbl.flushed);
1470 static void ipoib_neigh_reclaim(struct rcu_head *rp)
1472 /* Called as a result of removal from hash table */
1473 struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu);
1474 /* note TX context may hold another ref */
1475 ipoib_neigh_put(neigh);
1478 void ipoib_neigh_free(struct ipoib_neigh *neigh)
1480 struct net_device *dev = neigh->dev;
1481 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1482 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1483 struct ipoib_neigh_hash *htbl;
1484 struct ipoib_neigh __rcu **np;
1485 struct ipoib_neigh *n;
1488 htbl = rcu_dereference_protected(ntbl->htbl,
1489 lockdep_is_held(&priv->lock));
1493 hash_val = ipoib_addr_hash(htbl, neigh->daddr);
1494 np = &htbl->buckets[hash_val];
1495 for (n = rcu_dereference_protected(*np,
1496 lockdep_is_held(&priv->lock));
1498 n = rcu_dereference_protected(*np,
1499 lockdep_is_held(&priv->lock))) {
1502 rcu_assign_pointer(*np,
1503 rcu_dereference_protected(neigh->hnext,
1504 lockdep_is_held(&priv->lock)));
1505 /* remove from parent list */
1506 list_del_init(&neigh->list);
1507 call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1515 static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
1517 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1518 struct ipoib_neigh_hash *htbl;
1519 struct ipoib_neigh __rcu **buckets;
1522 clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1524 htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
1527 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1528 size = roundup_pow_of_two(arp_tbl.gc_thresh3);
1529 buckets = kzalloc(size * sizeof(*buckets), GFP_KERNEL);
1535 htbl->mask = (size - 1);
1536 htbl->buckets = buckets;
1537 RCU_INIT_POINTER(ntbl->htbl, htbl);
1539 atomic_set(&ntbl->entries, 0);
1541 /* start garbage collection */
1542 clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1543 queue_delayed_work(priv->wq, &priv->neigh_reap_task,
1544 arp_tbl.gc_interval);
1549 static void neigh_hash_free_rcu(struct rcu_head *head)
1551 struct ipoib_neigh_hash *htbl = container_of(head,
1552 struct ipoib_neigh_hash,
1554 struct ipoib_neigh __rcu **buckets = htbl->buckets;
1555 struct ipoib_neigh_table *ntbl = htbl->ntbl;
1559 complete(&ntbl->deleted);
1562 void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
1564 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1565 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1566 struct ipoib_neigh_hash *htbl;
1567 unsigned long flags;
1570 /* remove all neigh connected to a given path or mcast */
1571 spin_lock_irqsave(&priv->lock, flags);
1573 htbl = rcu_dereference_protected(ntbl->htbl,
1574 lockdep_is_held(&priv->lock));
1579 for (i = 0; i < htbl->size; i++) {
1580 struct ipoib_neigh *neigh;
1581 struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1583 while ((neigh = rcu_dereference_protected(*np,
1584 lockdep_is_held(&priv->lock))) != NULL) {
1585 /* delete neighs belong to this parent */
1586 if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) {
1587 rcu_assign_pointer(*np,
1588 rcu_dereference_protected(neigh->hnext,
1589 lockdep_is_held(&priv->lock)));
1590 /* remove from parent list */
1591 list_del_init(&neigh->list);
1592 call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1600 spin_unlock_irqrestore(&priv->lock, flags);
1603 static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
1605 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1606 struct ipoib_neigh_hash *htbl;
1607 unsigned long flags;
1608 int i, wait_flushed = 0;
1610 init_completion(&priv->ntbl.flushed);
1611 set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1613 spin_lock_irqsave(&priv->lock, flags);
1615 htbl = rcu_dereference_protected(ntbl->htbl,
1616 lockdep_is_held(&priv->lock));
1620 wait_flushed = atomic_read(&priv->ntbl.entries);
1624 for (i = 0; i < htbl->size; i++) {
1625 struct ipoib_neigh *neigh;
1626 struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1628 while ((neigh = rcu_dereference_protected(*np,
1629 lockdep_is_held(&priv->lock))) != NULL) {
1630 rcu_assign_pointer(*np,
1631 rcu_dereference_protected(neigh->hnext,
1632 lockdep_is_held(&priv->lock)));
1633 /* remove from path/mc list */
1634 list_del_init(&neigh->list);
1635 call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1640 rcu_assign_pointer(ntbl->htbl, NULL);
1641 call_rcu(&htbl->rcu, neigh_hash_free_rcu);
1644 spin_unlock_irqrestore(&priv->lock, flags);
1646 wait_for_completion(&priv->ntbl.flushed);
1649 static void ipoib_neigh_hash_uninit(struct net_device *dev)
1651 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1654 ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
1655 init_completion(&priv->ntbl.deleted);
1657 /* Stop GC if called at init fail need to cancel work */
1658 stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1660 cancel_delayed_work(&priv->neigh_reap_task);
1662 ipoib_flush_neighs(priv);
1664 wait_for_completion(&priv->ntbl.deleted);
1667 static void ipoib_napi_add(struct net_device *dev)
1669 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1671 netif_napi_add(dev, &priv->recv_napi, ipoib_rx_poll, IPOIB_NUM_WC);
1672 netif_napi_add(dev, &priv->send_napi, ipoib_tx_poll, MAX_SEND_CQE);
1675 static void ipoib_napi_del(struct net_device *dev)
1677 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1679 netif_napi_del(&priv->recv_napi);
1680 netif_napi_del(&priv->send_napi);
1683 static void ipoib_dev_uninit_default(struct net_device *dev)
1685 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1687 ipoib_transport_dev_cleanup(dev);
1689 ipoib_napi_del(dev);
1691 ipoib_cm_dev_cleanup(dev);
1693 kfree(priv->rx_ring);
1694 vfree(priv->tx_ring);
1696 priv->rx_ring = NULL;
1697 priv->tx_ring = NULL;
1700 static int ipoib_dev_init_default(struct net_device *dev)
1702 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1704 ipoib_napi_add(dev);
1706 /* Allocate RX/TX "rings" to hold queued skbs */
1707 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
1712 priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
1713 if (!priv->tx_ring) {
1714 pr_warn("%s: failed to allocate TX ring (%d entries)\n",
1715 priv->ca->name, ipoib_sendq_size);
1716 goto out_rx_ring_cleanup;
1719 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
1721 if (ipoib_transport_dev_init(dev, priv->ca)) {
1722 pr_warn("%s: ipoib_transport_dev_init failed\n",
1724 goto out_tx_ring_cleanup;
1727 /* after qp created set dev address */
1728 priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff;
1729 priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff;
1730 priv->dev->dev_addr[3] = (priv->qp->qp_num) & 0xff;
1734 out_tx_ring_cleanup:
1735 vfree(priv->tx_ring);
1737 out_rx_ring_cleanup:
1738 kfree(priv->rx_ring);
1741 ipoib_napi_del(dev);
1745 static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
1748 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1750 if (!priv->rn_ops->ndo_do_ioctl)
1753 return priv->rn_ops->ndo_do_ioctl(dev, ifr, cmd);
1756 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
1758 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1766 * the various IPoIB tasks assume they will never race against
1767 * themselves, so always use a single thread workqueue
1769 priv->wq = alloc_ordered_workqueue("ipoib_wq", WQ_MEM_RECLAIM);
1771 pr_warn("%s: failed to allocate device WQ\n", dev->name);
1775 /* create pd, which used both for control and datapath*/
1776 priv->pd = ib_alloc_pd(priv->ca, 0);
1777 if (IS_ERR(priv->pd)) {
1778 pr_warn("%s: failed to allocate PD\n", ca->name);
1782 ret = priv->rn_ops->ndo_init(dev);
1784 pr_warn("%s failed to init HW resource\n", dev->name);
1788 if (ipoib_neigh_hash_init(priv) < 0) {
1789 pr_warn("%s failed to init neigh hash\n", dev->name);
1790 goto out_dev_uninit;
1793 if (dev->flags & IFF_UP) {
1794 if (ipoib_ib_dev_open(dev)) {
1795 pr_warn("%s failed to open device\n", dev->name);
1797 goto out_dev_uninit;
1804 ipoib_ib_dev_cleanup(dev);
1808 ib_dealloc_pd(priv->pd);
1814 destroy_workqueue(priv->wq);
1822 void ipoib_dev_cleanup(struct net_device *dev)
1824 struct ipoib_dev_priv *priv = ipoib_priv(dev), *cpriv, *tcpriv;
1829 /* Delete any child interfaces first */
1830 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
1831 /* Stop GC on child */
1832 set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags);
1833 cancel_delayed_work(&cpriv->neigh_reap_task);
1834 unregister_netdevice_queue(cpriv->dev, &head);
1836 unregister_netdevice_many(&head);
1838 ipoib_neigh_hash_uninit(dev);
1840 ipoib_ib_dev_cleanup(dev);
1842 /* no more works over the priv->wq */
1844 flush_workqueue(priv->wq);
1845 destroy_workqueue(priv->wq);
1850 static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state)
1852 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1854 return ib_set_vf_link_state(priv->ca, vf, priv->port, link_state);
1857 static int ipoib_get_vf_config(struct net_device *dev, int vf,
1858 struct ifla_vf_info *ivf)
1860 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1863 err = ib_get_vf_config(priv->ca, vf, priv->port, ivf);
1872 static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type)
1874 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1876 if (type != IFLA_VF_IB_NODE_GUID && type != IFLA_VF_IB_PORT_GUID)
1879 return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type);
1882 static int ipoib_get_vf_stats(struct net_device *dev, int vf,
1883 struct ifla_vf_stats *vf_stats)
1885 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1887 return ib_get_vf_stats(priv->ca, vf, priv->port, vf_stats);
1890 static const struct header_ops ipoib_header_ops = {
1891 .create = ipoib_hard_header,
1894 static const struct net_device_ops ipoib_netdev_ops_pf = {
1895 .ndo_uninit = ipoib_uninit,
1896 .ndo_open = ipoib_open,
1897 .ndo_stop = ipoib_stop,
1898 .ndo_change_mtu = ipoib_change_mtu,
1899 .ndo_fix_features = ipoib_fix_features,
1900 .ndo_start_xmit = ipoib_start_xmit,
1901 .ndo_tx_timeout = ipoib_timeout,
1902 .ndo_set_rx_mode = ipoib_set_mcast_list,
1903 .ndo_get_iflink = ipoib_get_iflink,
1904 .ndo_set_vf_link_state = ipoib_set_vf_link_state,
1905 .ndo_get_vf_config = ipoib_get_vf_config,
1906 .ndo_get_vf_stats = ipoib_get_vf_stats,
1907 .ndo_set_vf_guid = ipoib_set_vf_guid,
1908 .ndo_set_mac_address = ipoib_set_mac,
1909 .ndo_get_stats64 = ipoib_get_stats,
1910 .ndo_do_ioctl = ipoib_ioctl,
1913 static const struct net_device_ops ipoib_netdev_ops_vf = {
1914 .ndo_uninit = ipoib_uninit,
1915 .ndo_open = ipoib_open,
1916 .ndo_stop = ipoib_stop,
1917 .ndo_change_mtu = ipoib_change_mtu,
1918 .ndo_fix_features = ipoib_fix_features,
1919 .ndo_start_xmit = ipoib_start_xmit,
1920 .ndo_tx_timeout = ipoib_timeout,
1921 .ndo_set_rx_mode = ipoib_set_mcast_list,
1922 .ndo_get_iflink = ipoib_get_iflink,
1923 .ndo_get_stats64 = ipoib_get_stats,
1924 .ndo_do_ioctl = ipoib_ioctl,
1927 void ipoib_setup_common(struct net_device *dev)
1929 dev->header_ops = &ipoib_header_ops;
1931 ipoib_set_ethtool_ops(dev);
1933 dev->watchdog_timeo = HZ;
1935 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
1937 dev->hard_header_len = IPOIB_HARD_LEN;
1938 dev->addr_len = INFINIBAND_ALEN;
1939 dev->type = ARPHRD_INFINIBAND;
1940 dev->tx_queue_len = ipoib_sendq_size * 2;
1941 dev->features = (NETIF_F_VLAN_CHALLENGED |
1943 netif_keep_dst(dev);
1945 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
1948 static void ipoib_build_priv(struct net_device *dev)
1950 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1953 spin_lock_init(&priv->lock);
1954 init_rwsem(&priv->vlan_rwsem);
1955 mutex_init(&priv->mcast_mutex);
1956 mutex_init(&priv->sysfs_mutex);
1958 INIT_LIST_HEAD(&priv->path_list);
1959 INIT_LIST_HEAD(&priv->child_intfs);
1960 INIT_LIST_HEAD(&priv->dead_ahs);
1961 INIT_LIST_HEAD(&priv->multicast_list);
1963 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
1964 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
1965 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light);
1966 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal);
1967 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy);
1968 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
1969 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
1970 INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
1973 static const struct net_device_ops ipoib_netdev_default_pf = {
1974 .ndo_init = ipoib_dev_init_default,
1975 .ndo_uninit = ipoib_dev_uninit_default,
1976 .ndo_open = ipoib_ib_dev_open_default,
1977 .ndo_stop = ipoib_ib_dev_stop_default,
1980 static struct net_device
1981 *ipoib_create_netdev_default(struct ib_device *hca,
1983 unsigned char name_assign_type,
1984 void (*setup)(struct net_device *))
1986 struct net_device *dev;
1987 struct rdma_netdev *rn;
1989 dev = alloc_netdev((int)sizeof(struct rdma_netdev),
1991 name_assign_type, setup);
1995 rn = netdev_priv(dev);
1997 rn->send = ipoib_send;
1998 rn->attach_mcast = ipoib_mcast_attach;
1999 rn->detach_mcast = ipoib_mcast_detach;
2000 rn->free_rdma_netdev = free_netdev;
2003 dev->netdev_ops = &ipoib_netdev_default_pf;
2008 static struct net_device *ipoib_get_netdev(struct ib_device *hca, u8 port,
2011 struct net_device *dev;
2013 if (hca->alloc_rdma_netdev) {
2014 dev = hca->alloc_rdma_netdev(hca, port,
2015 RDMA_NETDEV_IPOIB, name,
2017 ipoib_setup_common);
2018 if (IS_ERR_OR_NULL(dev) && PTR_ERR(dev) != -EOPNOTSUPP)
2022 if (!hca->alloc_rdma_netdev || PTR_ERR(dev) == -EOPNOTSUPP)
2023 dev = ipoib_create_netdev_default(hca, name, NET_NAME_UNKNOWN,
2024 ipoib_setup_common);
2029 struct ipoib_dev_priv *ipoib_intf_alloc(struct ib_device *hca, u8 port,
2032 struct net_device *dev;
2033 struct ipoib_dev_priv *priv;
2034 struct rdma_netdev *rn;
2036 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
2040 dev = ipoib_get_netdev(hca, port, name);
2044 priv->rn_ops = dev->netdev_ops;
2046 /* fixme : should be after the query_cap */
2047 if (priv->hca_caps & IB_DEVICE_VIRTUAL_FUNCTION)
2048 dev->netdev_ops = &ipoib_netdev_ops_vf;
2050 dev->netdev_ops = &ipoib_netdev_ops_pf;
2052 rn = netdev_priv(dev);
2053 rn->clnt_priv = priv;
2054 ipoib_build_priv(dev);
2062 static ssize_t show_pkey(struct device *dev,
2063 struct device_attribute *attr, char *buf)
2065 struct net_device *ndev = to_net_dev(dev);
2066 struct ipoib_dev_priv *priv = ipoib_priv(ndev);
2068 return sprintf(buf, "0x%04x\n", priv->pkey);
2070 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
2072 static ssize_t show_umcast(struct device *dev,
2073 struct device_attribute *attr, char *buf)
2075 struct net_device *ndev = to_net_dev(dev);
2076 struct ipoib_dev_priv *priv = ipoib_priv(ndev);
2078 return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
2081 void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
2083 struct ipoib_dev_priv *priv = ipoib_priv(ndev);
2085 if (umcast_val > 0) {
2086 set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
2087 ipoib_warn(priv, "ignoring multicast groups joined directly "
2090 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
2093 static ssize_t set_umcast(struct device *dev,
2094 struct device_attribute *attr,
2095 const char *buf, size_t count)
2097 unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
2099 ipoib_set_umcast(to_net_dev(dev), umcast_val);
2103 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
2105 int ipoib_add_umcast_attr(struct net_device *dev)
2107 return device_create_file(&dev->dev, &dev_attr_umcast);
2110 static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
2112 struct ipoib_dev_priv *child_priv;
2113 struct net_device *netdev = priv->dev;
2115 netif_addr_lock_bh(netdev);
2117 memcpy(&priv->local_gid.global.interface_id,
2118 &gid->global.interface_id,
2119 sizeof(gid->global.interface_id));
2120 memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid));
2121 clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
2123 netif_addr_unlock_bh(netdev);
2125 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
2126 down_read(&priv->vlan_rwsem);
2127 list_for_each_entry(child_priv, &priv->child_intfs, list)
2128 set_base_guid(child_priv, gid);
2129 up_read(&priv->vlan_rwsem);
2133 static int ipoib_check_lladdr(struct net_device *dev,
2134 struct sockaddr_storage *ss)
2136 union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
2139 netif_addr_lock_bh(dev);
2141 /* Make sure the QPN, reserved and subnet prefix match the current
2142 * lladdr, it also makes sure the lladdr is unicast.
2144 if (memcmp(dev->dev_addr, ss->__data,
2145 4 + sizeof(gid->global.subnet_prefix)) ||
2146 gid->global.interface_id == 0)
2149 netif_addr_unlock_bh(dev);
2154 static int ipoib_set_mac(struct net_device *dev, void *addr)
2156 struct ipoib_dev_priv *priv = ipoib_priv(dev);
2157 struct sockaddr_storage *ss = addr;
2160 if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
2163 ret = ipoib_check_lladdr(dev, ss);
2167 set_base_guid(priv, (union ib_gid *)(ss->__data + 4));
2169 queue_work(ipoib_workqueue, &priv->flush_light);
2174 static ssize_t create_child(struct device *dev,
2175 struct device_attribute *attr,
2176 const char *buf, size_t count)
2181 if (sscanf(buf, "%i", &pkey) != 1)
2184 if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000)
2188 * Set the full membership bit, so that we join the right
2189 * broadcast group, etc.
2193 ret = ipoib_vlan_add(to_net_dev(dev), pkey);
2195 return ret ? ret : count;
2197 static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
2199 static ssize_t delete_child(struct device *dev,
2200 struct device_attribute *attr,
2201 const char *buf, size_t count)
2206 if (sscanf(buf, "%i", &pkey) != 1)
2209 if (pkey < 0 || pkey > 0xffff)
2212 ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
2214 return ret ? ret : count;
2217 static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
2219 int ipoib_add_pkey_attr(struct net_device *dev)
2221 return device_create_file(&dev->dev, &dev_attr_pkey);
2224 void ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
2226 priv->hca_caps = hca->attrs.device_cap_flags;
2228 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
2229 priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
2231 if (priv->hca_caps & IB_DEVICE_UD_TSO)
2232 priv->dev->hw_features |= NETIF_F_TSO;
2234 priv->dev->features |= priv->dev->hw_features;
2238 static struct net_device *ipoib_add_port(const char *format,
2239 struct ib_device *hca, u8 port)
2241 struct ipoib_dev_priv *priv;
2242 struct ib_port_attr attr;
2243 struct rdma_netdev *rn;
2244 int result = -ENOMEM;
2246 priv = ipoib_intf_alloc(hca, port, format);
2248 pr_warn("%s, %d: ipoib_intf_alloc failed\n", hca->name, port);
2249 goto alloc_mem_failed;
2252 SET_NETDEV_DEV(priv->dev, hca->dev.parent);
2253 priv->dev->dev_id = port - 1;
2255 result = ib_query_port(hca, port, &attr);
2257 pr_warn("%s: ib_query_port %d failed\n", hca->name, port);
2258 goto device_init_failed;
2261 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
2263 /* MTU will be reset when mcast join happens */
2264 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
2265 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
2266 priv->dev->max_mtu = IPOIB_CM_MTU;
2268 priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
2270 result = ib_query_pkey(hca, port, 0, &priv->pkey);
2272 pr_warn("%s: ib_query_pkey port %d failed (ret = %d)\n",
2273 hca->name, port, result);
2274 goto device_init_failed;
2277 ipoib_set_dev_features(priv, hca);
2280 * Set the full membership bit, so that we join the right
2281 * broadcast group, etc.
2283 priv->pkey |= 0x8000;
2285 priv->dev->broadcast[8] = priv->pkey >> 8;
2286 priv->dev->broadcast[9] = priv->pkey & 0xff;
2288 result = ib_query_gid(hca, port, 0, &priv->local_gid, NULL);
2290 pr_warn("%s: ib_query_gid port %d failed (ret = %d)\n",
2291 hca->name, port, result);
2292 goto device_init_failed;
2295 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw,
2296 sizeof(union ib_gid));
2297 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
2299 result = ipoib_dev_init(priv->dev, hca, port);
2301 pr_warn("%s: failed to initialize port %d (ret = %d)\n",
2302 hca->name, port, result);
2303 goto device_init_failed;
2306 INIT_IB_EVENT_HANDLER(&priv->event_handler,
2307 priv->ca, ipoib_event);
2308 ib_register_event_handler(&priv->event_handler);
2310 /* call event handler to ensure pkey in sync */
2311 queue_work(ipoib_workqueue, &priv->flush_heavy);
2313 result = register_netdev(priv->dev);
2315 pr_warn("%s: couldn't register ipoib port %d; error %d\n",
2316 hca->name, port, result);
2317 goto register_failed;
2321 if (ipoib_cm_add_mode_attr(priv->dev))
2323 if (ipoib_add_pkey_attr(priv->dev))
2325 if (ipoib_add_umcast_attr(priv->dev))
2327 if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
2329 if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
2335 unregister_netdev(priv->dev);
2338 ib_unregister_event_handler(&priv->event_handler);
2339 flush_workqueue(ipoib_workqueue);
2340 /* Stop GC if started before flush */
2341 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
2342 cancel_delayed_work(&priv->neigh_reap_task);
2343 flush_workqueue(priv->wq);
2344 ipoib_dev_cleanup(priv->dev);
2347 rn = netdev_priv(priv->dev);
2348 rn->free_rdma_netdev(priv->dev);
2352 return ERR_PTR(result);
2355 static void ipoib_add_one(struct ib_device *device)
2357 struct list_head *dev_list;
2358 struct net_device *dev;
2359 struct ipoib_dev_priv *priv;
2363 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
2367 INIT_LIST_HEAD(dev_list);
2369 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
2370 if (!rdma_protocol_ib(device, p))
2372 dev = ipoib_add_port("ib%d", device, p);
2374 priv = ipoib_priv(dev);
2375 list_add_tail(&priv->list, dev_list);
2385 ib_set_client_data(device, &ipoib_client, dev_list);
2388 static void ipoib_remove_one(struct ib_device *device, void *client_data)
2390 struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv;
2391 struct list_head *dev_list = client_data;
2396 list_for_each_entry_safe(priv, tmp, dev_list, list) {
2397 struct rdma_netdev *parent_rn = netdev_priv(priv->dev);
2399 ib_unregister_event_handler(&priv->event_handler);
2400 flush_workqueue(ipoib_workqueue);
2402 /* mark interface in the middle of destruction */
2403 set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags);
2406 dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
2410 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
2411 cancel_delayed_work(&priv->neigh_reap_task);
2412 flush_workqueue(priv->wq);
2414 /* Wrap rtnl_lock/unlock with mutex to protect sysfs calls */
2415 mutex_lock(&priv->sysfs_mutex);
2416 unregister_netdev(priv->dev);
2417 mutex_unlock(&priv->sysfs_mutex);
2419 parent_rn->free_rdma_netdev(priv->dev);
2421 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
2422 struct rdma_netdev *child_rn;
2424 child_rn = netdev_priv(cpriv->dev);
2425 child_rn->free_rdma_netdev(cpriv->dev);
2435 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2436 static struct notifier_block ipoib_netdev_notifier = {
2437 .notifier_call = ipoib_netdev_event,
2441 static int __init ipoib_init_module(void)
2445 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
2446 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
2447 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
2449 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
2450 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
2451 ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
2452 #ifdef CONFIG_INFINIBAND_IPOIB_CM
2453 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
2454 ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0);
2458 * When copying small received packets, we only copy from the
2459 * linear data part of the SKB, so we rely on this condition.
2461 BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
2463 ret = ipoib_register_debugfs();
2468 * We create a global workqueue here that is used for all flush
2469 * operations. However, if you attempt to flush a workqueue
2470 * from a task on that same workqueue, it deadlocks the system.
2471 * We want to be able to flush the tasks associated with a
2472 * specific net device, so we also create a workqueue for each
2473 * netdevice. We queue up the tasks for that device only on
2474 * its private workqueue, and we only queue up flush events
2475 * on our global flush workqueue. This avoids the deadlocks.
2477 ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush",
2479 if (!ipoib_workqueue) {
2484 ib_sa_register_client(&ipoib_sa_client);
2486 ret = ib_register_client(&ipoib_client);
2490 ret = ipoib_netlink_init();
2494 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2495 register_netdevice_notifier(&ipoib_netdev_notifier);
2500 ib_unregister_client(&ipoib_client);
2503 ib_sa_unregister_client(&ipoib_sa_client);
2504 destroy_workqueue(ipoib_workqueue);
2507 ipoib_unregister_debugfs();
2512 static void __exit ipoib_cleanup_module(void)
2514 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2515 unregister_netdevice_notifier(&ipoib_netdev_notifier);
2517 ipoib_netlink_fini();
2518 ib_unregister_client(&ipoib_client);
2519 ib_sa_unregister_client(&ipoib_sa_client);
2520 ipoib_unregister_debugfs();
2521 destroy_workqueue(ipoib_workqueue);
2524 module_init(ipoib_init_module);
2525 module_exit(ipoib_cleanup_module);