1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
4 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
6 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
10 #include <linux/completion.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/device.h>
13 #include <linux/module.h>
14 #include <linux/err.h>
15 #include <linux/idr.h>
16 #include <linux/interrupt.h>
17 #include <linux/random.h>
18 #include <linux/rbtree.h>
19 #include <linux/spinlock.h>
20 #include <linux/slab.h>
21 #include <linux/sysfs.h>
22 #include <linux/workqueue.h>
23 #include <linux/kdev_t.h>
24 #include <linux/etherdevice.h>
26 #include <rdma/ib_cache.h>
27 #include <rdma/ib_cm.h>
28 #include <rdma/ib_sysfs.h>
30 #include "core_priv.h"
33 MODULE_AUTHOR("Sean Hefty");
34 MODULE_DESCRIPTION("InfiniBand CM");
35 MODULE_LICENSE("Dual BSD/GPL");
37 #define CM_DESTROY_ID_WAIT_TIMEOUT 10000 /* msecs */
38 static const char * const ibcm_rej_reason_strs[] = {
39 [IB_CM_REJ_NO_QP] = "no QP",
40 [IB_CM_REJ_NO_EEC] = "no EEC",
41 [IB_CM_REJ_NO_RESOURCES] = "no resources",
42 [IB_CM_REJ_TIMEOUT] = "timeout",
43 [IB_CM_REJ_UNSUPPORTED] = "unsupported",
44 [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID",
45 [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance",
46 [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID",
47 [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type",
48 [IB_CM_REJ_STALE_CONN] = "stale conn",
49 [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist",
50 [IB_CM_REJ_INVALID_GID] = "invalid GID",
51 [IB_CM_REJ_INVALID_LID] = "invalid LID",
52 [IB_CM_REJ_INVALID_SL] = "invalid SL",
53 [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class",
54 [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit",
55 [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate",
56 [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID",
57 [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID",
58 [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL",
59 [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class",
60 [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit",
61 [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate",
62 [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect",
63 [IB_CM_REJ_PORT_REDIRECT] = "port redirect",
64 [IB_CM_REJ_INVALID_MTU] = "invalid MTU",
65 [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources",
66 [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined",
67 [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry",
68 [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID",
69 [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
70 [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
71 [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
72 [IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED] =
73 "vendor option is not supported",
76 const char *__attribute_const__ ibcm_reject_msg(int reason)
78 size_t index = reason;
80 if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
81 ibcm_rej_reason_strs[index])
82 return ibcm_rej_reason_strs[index];
84 return "unrecognized reason";
86 EXPORT_SYMBOL(ibcm_reject_msg);
90 static int cm_add_one(struct ib_device *device);
91 static void cm_remove_one(struct ib_device *device, void *client_data);
92 static void cm_process_work(struct cm_id_private *cm_id_priv,
93 struct cm_work *work);
94 static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
95 struct ib_cm_sidr_rep_param *param);
96 static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
97 const void *private_data, u8 private_data_len);
98 static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
99 void *private_data, u8 private_data_len);
100 static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
101 enum ib_cm_rej_reason reason, void *ari,
102 u8 ari_length, const void *private_data,
103 u8 private_data_len);
105 static struct ib_client cm_client = {
108 .remove = cm_remove_one
111 static struct ib_cm {
113 struct list_head device_list;
114 rwlock_t device_lock;
115 struct rb_root listen_service_table;
116 u64 listen_service_id;
117 /* struct rb_root peer_service_table; todo: fix peer to peer */
118 struct rb_root remote_qp_table;
119 struct rb_root remote_id_table;
120 struct rb_root remote_sidr_table;
121 struct xarray local_id_table;
123 __be32 random_id_operand;
124 struct list_head timewait_list;
125 struct workqueue_struct *wq;
128 /* Counter indexes ordered by attribute ID */
142 CM_ATTR_ID_OFFSET = 0x0010,
153 struct cm_counter_attribute {
154 struct ib_port_attribute attr;
155 unsigned short group;
156 unsigned short index;
160 struct cm_device *cm_dev;
161 struct ib_mad_agent *mad_agent;
163 atomic_long_t counters[CM_COUNTER_GROUPS][CM_ATTR_COUNT];
168 struct list_head list;
169 spinlock_t mad_agent_lock;
170 struct ib_device *ib_device;
173 struct cm_port *port[];
177 struct cm_port *port;
178 struct rdma_ah_attr ah_attr;
185 struct delayed_work work;
186 struct list_head list;
187 struct cm_port *port;
188 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
189 __be32 local_id; /* Established / timewait */
191 struct ib_cm_event cm_event;
192 struct sa_path_rec path[];
195 struct cm_timewait_info {
197 struct list_head list;
198 struct rb_node remote_qp_node;
199 struct rb_node remote_id_node;
200 __be64 remote_ca_guid;
202 u8 inserted_remote_qp;
203 u8 inserted_remote_id;
206 struct cm_id_private {
209 struct rb_node service_node;
210 struct rb_node sidr_id_node;
212 spinlock_t lock; /* Do not acquire inside cm.lock */
213 struct completion comp;
215 /* Number of clients sharing this ib_cm_id. Only valid for listeners.
216 * Protected by the cm.lock spinlock.
218 int listen_sharecount;
221 struct ib_mad_send_buf *msg;
222 struct cm_timewait_info *timewait_info;
223 /* todo: use alternate port on send failure */
231 enum ib_qp_type qp_type;
235 enum ib_mtu path_mtu;
239 u8 responder_resources;
246 struct list_head work_list;
249 struct rdma_ucm_ece ece;
252 static void cm_dev_release(struct kref *kref)
254 struct cm_device *cm_dev = container_of(kref, struct cm_device, kref);
257 rdma_for_each_port(cm_dev->ib_device, i)
258 kfree(cm_dev->port[i - 1]);
263 static void cm_device_put(struct cm_device *cm_dev)
265 kref_put(&cm_dev->kref, cm_dev_release);
268 static void cm_work_handler(struct work_struct *work);
270 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
272 if (refcount_dec_and_test(&cm_id_priv->refcount))
273 complete(&cm_id_priv->comp);
276 static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
278 struct ib_mad_agent *mad_agent;
279 struct ib_mad_send_buf *m;
282 lockdep_assert_held(&cm_id_priv->lock);
284 if (!cm_id_priv->av.port)
285 return ERR_PTR(-EINVAL);
287 spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
288 mad_agent = cm_id_priv->av.port->mad_agent;
290 m = ERR_PTR(-EINVAL);
294 ah = rdma_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr, 0);
300 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
301 cm_id_priv->av.pkey_index,
302 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
304 IB_MGMT_BASE_VERSION);
306 rdma_destroy_ah(ah, 0);
310 /* Timeout set by caller if response is expected. */
312 m->retries = cm_id_priv->max_cm_retries;
314 refcount_inc(&cm_id_priv->refcount);
315 m->context[0] = cm_id_priv;
318 spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
322 static void cm_free_msg(struct ib_mad_send_buf *msg)
324 struct cm_id_private *cm_id_priv = msg->context[0];
327 rdma_destroy_ah(msg->ah, 0);
328 cm_deref_id(cm_id_priv);
329 ib_free_send_mad(msg);
332 static struct ib_mad_send_buf *
333 cm_alloc_priv_msg(struct cm_id_private *cm_id_priv)
335 struct ib_mad_send_buf *msg;
337 lockdep_assert_held(&cm_id_priv->lock);
339 msg = cm_alloc_msg(cm_id_priv);
342 cm_id_priv->msg = msg;
346 static void cm_free_priv_msg(struct ib_mad_send_buf *msg)
348 struct cm_id_private *cm_id_priv = msg->context[0];
350 lockdep_assert_held(&cm_id_priv->lock);
352 if (!WARN_ON(cm_id_priv->msg != msg))
353 cm_id_priv->msg = NULL;
356 rdma_destroy_ah(msg->ah, 0);
357 cm_deref_id(cm_id_priv);
358 ib_free_send_mad(msg);
361 static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
362 struct ib_mad_recv_wc *mad_recv_wc)
364 return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
365 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
367 IB_MGMT_BASE_VERSION);
370 static int cm_create_response_msg_ah(struct cm_port *port,
371 struct ib_mad_recv_wc *mad_recv_wc,
372 struct ib_mad_send_buf *msg)
376 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
377 mad_recv_wc->recv_buf.grh, port->port_num);
385 static int cm_alloc_response_msg(struct cm_port *port,
386 struct ib_mad_recv_wc *mad_recv_wc,
387 struct ib_mad_send_buf **msg)
389 struct ib_mad_send_buf *m;
392 m = cm_alloc_response_msg_no_ah(port, mad_recv_wc);
396 ret = cm_create_response_msg_ah(port, mad_recv_wc, m);
406 static void cm_free_response_msg(struct ib_mad_send_buf *msg)
409 rdma_destroy_ah(msg->ah, 0);
410 ib_free_send_mad(msg);
413 static void *cm_copy_private_data(const void *private_data, u8 private_data_len)
417 if (!private_data || !private_data_len)
420 data = kmemdup(private_data, private_data_len, GFP_KERNEL);
422 return ERR_PTR(-ENOMEM);
427 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
428 void *private_data, u8 private_data_len)
430 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
431 kfree(cm_id_priv->private_data);
433 cm_id_priv->private_data = private_data;
434 cm_id_priv->private_data_len = private_data_len;
437 static void cm_set_av_port(struct cm_av *av, struct cm_port *port)
439 struct cm_port *old_port = av->port;
441 if (old_port == port)
446 cm_device_put(old_port->cm_dev);
448 kref_get(&port->cm_dev->kref);
451 static void cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
452 struct rdma_ah_attr *ah_attr, struct cm_av *av)
454 cm_set_av_port(av, port);
455 av->pkey_index = wc->pkey_index;
456 rdma_move_ah_attr(&av->ah_attr, ah_attr);
459 static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
460 struct ib_grh *grh, struct cm_av *av)
462 cm_set_av_port(av, port);
463 av->pkey_index = wc->pkey_index;
464 return ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
469 static struct cm_port *
470 get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr)
472 struct cm_device *cm_dev;
473 struct cm_port *port = NULL;
477 read_lock_irqsave(&cm.device_lock, flags);
478 list_for_each_entry(cm_dev, &cm.device_list, list) {
479 if (cm_dev->ib_device == attr->device) {
480 port = cm_dev->port[attr->port_num - 1];
484 read_unlock_irqrestore(&cm.device_lock, flags);
486 /* SGID attribute can be NULL in following
488 * (a) Alternative path
489 * (b) IB link layer without GRH
490 * (c) LAP send messages
492 read_lock_irqsave(&cm.device_lock, flags);
493 list_for_each_entry(cm_dev, &cm.device_list, list) {
494 attr = rdma_find_gid(cm_dev->ib_device,
496 sa_conv_pathrec_to_gid_type(path),
499 port = cm_dev->port[attr->port_num - 1];
503 read_unlock_irqrestore(&cm.device_lock, flags);
505 rdma_put_gid_attr(attr);
510 static int cm_init_av_by_path(struct sa_path_rec *path,
511 const struct ib_gid_attr *sgid_attr,
514 struct rdma_ah_attr new_ah_attr;
515 struct cm_device *cm_dev;
516 struct cm_port *port;
519 port = get_cm_port_from_path(path, sgid_attr);
522 cm_dev = port->cm_dev;
524 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
525 be16_to_cpu(path->pkey), &av->pkey_index);
529 cm_set_av_port(av, port);
532 * av->ah_attr might be initialized based on wc or during
533 * request processing time which might have reference to sgid_attr.
534 * So initialize a new ah_attr on stack.
535 * If initialization fails, old ah_attr is used for sending any
536 * responses. If initialization is successful, than new ah_attr
537 * is used by overwriting the old one. So that right ah_attr
538 * can be used to return an error response.
540 ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
541 &new_ah_attr, sgid_attr);
545 av->timeout = path->packet_life_time + 1;
546 rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
550 /* Move av created by cm_init_av_by_path(), so av.dgid is not moved */
551 static void cm_move_av_from_path(struct cm_av *dest, struct cm_av *src)
553 cm_set_av_port(dest, src->port);
554 cm_set_av_port(src, NULL);
555 dest->pkey_index = src->pkey_index;
556 rdma_move_ah_attr(&dest->ah_attr, &src->ah_attr);
557 dest->timeout = src->timeout;
560 static void cm_destroy_av(struct cm_av *av)
562 rdma_destroy_ah_attr(&av->ah_attr);
563 cm_set_av_port(av, NULL);
566 static u32 cm_local_id(__be32 local_id)
568 return (__force u32) (local_id ^ cm.random_id_operand);
571 static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id)
573 struct cm_id_private *cm_id_priv;
576 cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
577 if (!cm_id_priv || cm_id_priv->id.remote_id != remote_id ||
578 !refcount_inc_not_zero(&cm_id_priv->refcount))
586 * Trivial helpers to strip endian annotation and compare; the
587 * endianness doesn't actually matter since we just need a stable
588 * order for the RB tree.
590 static int be32_lt(__be32 a, __be32 b)
592 return (__force u32) a < (__force u32) b;
595 static int be32_gt(__be32 a, __be32 b)
597 return (__force u32) a > (__force u32) b;
600 static int be64_lt(__be64 a, __be64 b)
602 return (__force u64) a < (__force u64) b;
605 static int be64_gt(__be64 a, __be64 b)
607 return (__force u64) a > (__force u64) b;
611 * Inserts a new cm_id_priv into the listen_service_table. Returns cm_id_priv
612 * if the new ID was inserted, NULL if it could not be inserted due to a
613 * collision, or the existing cm_id_priv ready for shared usage.
615 static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
616 ib_cm_handler shared_handler)
618 struct rb_node **link = &cm.listen_service_table.rb_node;
619 struct rb_node *parent = NULL;
620 struct cm_id_private *cur_cm_id_priv;
621 __be64 service_id = cm_id_priv->id.service_id;
624 spin_lock_irqsave(&cm.lock, flags);
627 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
630 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
631 link = &(*link)->rb_left;
632 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
633 link = &(*link)->rb_right;
634 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
635 link = &(*link)->rb_left;
636 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
637 link = &(*link)->rb_right;
640 * Sharing an ib_cm_id with different handlers is not
643 if (cur_cm_id_priv->id.cm_handler != shared_handler ||
644 cur_cm_id_priv->id.context ||
645 WARN_ON(!cur_cm_id_priv->id.cm_handler)) {
646 spin_unlock_irqrestore(&cm.lock, flags);
649 refcount_inc(&cur_cm_id_priv->refcount);
650 cur_cm_id_priv->listen_sharecount++;
651 spin_unlock_irqrestore(&cm.lock, flags);
652 return cur_cm_id_priv;
655 cm_id_priv->listen_sharecount++;
656 rb_link_node(&cm_id_priv->service_node, parent, link);
657 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
658 spin_unlock_irqrestore(&cm.lock, flags);
662 static struct cm_id_private *cm_find_listen(struct ib_device *device,
665 struct rb_node *node = cm.listen_service_table.rb_node;
666 struct cm_id_private *cm_id_priv;
669 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
671 if (device < cm_id_priv->id.device)
672 node = node->rb_left;
673 else if (device > cm_id_priv->id.device)
674 node = node->rb_right;
675 else if (be64_lt(service_id, cm_id_priv->id.service_id))
676 node = node->rb_left;
677 else if (be64_gt(service_id, cm_id_priv->id.service_id))
678 node = node->rb_right;
680 refcount_inc(&cm_id_priv->refcount);
687 static struct cm_timewait_info *
688 cm_insert_remote_id(struct cm_timewait_info *timewait_info)
690 struct rb_node **link = &cm.remote_id_table.rb_node;
691 struct rb_node *parent = NULL;
692 struct cm_timewait_info *cur_timewait_info;
693 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
694 __be32 remote_id = timewait_info->work.remote_id;
698 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
700 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
701 link = &(*link)->rb_left;
702 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
703 link = &(*link)->rb_right;
704 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
705 link = &(*link)->rb_left;
706 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
707 link = &(*link)->rb_right;
709 return cur_timewait_info;
711 timewait_info->inserted_remote_id = 1;
712 rb_link_node(&timewait_info->remote_id_node, parent, link);
713 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
717 static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid,
720 struct rb_node *node = cm.remote_id_table.rb_node;
721 struct cm_timewait_info *timewait_info;
722 struct cm_id_private *res = NULL;
724 spin_lock_irq(&cm.lock);
726 timewait_info = rb_entry(node, struct cm_timewait_info,
728 if (be32_lt(remote_id, timewait_info->work.remote_id))
729 node = node->rb_left;
730 else if (be32_gt(remote_id, timewait_info->work.remote_id))
731 node = node->rb_right;
732 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
733 node = node->rb_left;
734 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
735 node = node->rb_right;
737 res = cm_acquire_id(timewait_info->work.local_id,
738 timewait_info->work.remote_id);
742 spin_unlock_irq(&cm.lock);
746 static struct cm_timewait_info *
747 cm_insert_remote_qpn(struct cm_timewait_info *timewait_info)
749 struct rb_node **link = &cm.remote_qp_table.rb_node;
750 struct rb_node *parent = NULL;
751 struct cm_timewait_info *cur_timewait_info;
752 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
753 __be32 remote_qpn = timewait_info->remote_qpn;
757 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
759 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
760 link = &(*link)->rb_left;
761 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
762 link = &(*link)->rb_right;
763 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
764 link = &(*link)->rb_left;
765 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
766 link = &(*link)->rb_right;
768 return cur_timewait_info;
770 timewait_info->inserted_remote_qp = 1;
771 rb_link_node(&timewait_info->remote_qp_node, parent, link);
772 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
776 static struct cm_id_private *
777 cm_insert_remote_sidr(struct cm_id_private *cm_id_priv)
779 struct rb_node **link = &cm.remote_sidr_table.rb_node;
780 struct rb_node *parent = NULL;
781 struct cm_id_private *cur_cm_id_priv;
782 __be32 remote_id = cm_id_priv->id.remote_id;
786 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
788 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
789 link = &(*link)->rb_left;
790 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
791 link = &(*link)->rb_right;
793 if (cur_cm_id_priv->sidr_slid < cm_id_priv->sidr_slid)
794 link = &(*link)->rb_left;
795 else if (cur_cm_id_priv->sidr_slid > cm_id_priv->sidr_slid)
796 link = &(*link)->rb_right;
798 return cur_cm_id_priv;
801 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
802 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
806 static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
807 ib_cm_handler cm_handler,
810 struct cm_id_private *cm_id_priv;
814 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
816 return ERR_PTR(-ENOMEM);
818 cm_id_priv->id.state = IB_CM_IDLE;
819 cm_id_priv->id.device = device;
820 cm_id_priv->id.cm_handler = cm_handler;
821 cm_id_priv->id.context = context;
822 cm_id_priv->id.remote_cm_qpn = 1;
824 RB_CLEAR_NODE(&cm_id_priv->service_node);
825 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
826 spin_lock_init(&cm_id_priv->lock);
827 init_completion(&cm_id_priv->comp);
828 INIT_LIST_HEAD(&cm_id_priv->work_list);
829 atomic_set(&cm_id_priv->work_count, -1);
830 refcount_set(&cm_id_priv->refcount, 1);
832 ret = xa_alloc_cyclic(&cm.local_id_table, &id, NULL, xa_limit_32b,
833 &cm.local_id_next, GFP_KERNEL);
836 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
846 * Make the ID visible to the MAD handlers and other threads that use the
849 static void cm_finalize_id(struct cm_id_private *cm_id_priv)
851 xa_store(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
852 cm_id_priv, GFP_ATOMIC);
855 struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
856 ib_cm_handler cm_handler,
859 struct cm_id_private *cm_id_priv;
861 cm_id_priv = cm_alloc_id_priv(device, cm_handler, context);
862 if (IS_ERR(cm_id_priv))
863 return ERR_CAST(cm_id_priv);
865 cm_finalize_id(cm_id_priv);
866 return &cm_id_priv->id;
868 EXPORT_SYMBOL(ib_create_cm_id);
870 static struct cm_work *cm_dequeue_work(struct cm_id_private *cm_id_priv)
872 struct cm_work *work;
874 if (list_empty(&cm_id_priv->work_list))
877 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
878 list_del(&work->list);
882 static void cm_free_work(struct cm_work *work)
884 if (work->mad_recv_wc)
885 ib_free_recv_mad(work->mad_recv_wc);
889 static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
890 struct cm_work *work)
891 __releases(&cm_id_priv->lock)
896 * To deliver the event to the user callback we have the drop the
897 * spinlock, however, we need to ensure that the user callback is single
898 * threaded and receives events in the temporal order. If there are
899 * already events being processed then thread new events onto a list,
900 * the thread currently processing will pick them up.
902 immediate = atomic_inc_and_test(&cm_id_priv->work_count);
904 list_add_tail(&work->list, &cm_id_priv->work_list);
906 * This routine always consumes incoming reference. Once queued
907 * to the work_list then a reference is held by the thread
908 * currently running cm_process_work() and this reference is not
911 cm_deref_id(cm_id_priv);
913 spin_unlock_irq(&cm_id_priv->lock);
916 cm_process_work(cm_id_priv, work);
919 static inline int cm_convert_to_ms(int iba_time)
921 /* approximate conversion to ms from 4.096us x 2^iba_time */
922 return 1 << max(iba_time - 8, 0);
926 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
927 * Because of how ack_timeout is stored, adding one doubles the timeout.
928 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
929 * increment it (round up) only if the other is within 50%.
931 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
933 int ack_timeout = packet_life_time + 1;
935 if (ack_timeout >= ca_ack_delay)
936 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
938 ack_timeout = ca_ack_delay +
939 (ack_timeout >= (ca_ack_delay - 1));
941 return min(31, ack_timeout);
944 static void cm_remove_remote(struct cm_id_private *cm_id_priv)
946 struct cm_timewait_info *timewait_info = cm_id_priv->timewait_info;
948 if (timewait_info->inserted_remote_id) {
949 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
950 timewait_info->inserted_remote_id = 0;
953 if (timewait_info->inserted_remote_qp) {
954 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
955 timewait_info->inserted_remote_qp = 0;
959 static struct cm_timewait_info *cm_create_timewait_info(__be32 local_id)
961 struct cm_timewait_info *timewait_info;
963 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
965 return ERR_PTR(-ENOMEM);
967 timewait_info->work.local_id = local_id;
968 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
969 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
970 return timewait_info;
973 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
977 struct cm_device *cm_dev;
979 lockdep_assert_held(&cm_id_priv->lock);
981 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
985 spin_lock_irqsave(&cm.lock, flags);
986 cm_remove_remote(cm_id_priv);
987 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
988 spin_unlock_irqrestore(&cm.lock, flags);
991 * The cm_id could be destroyed by the user before we exit timewait.
992 * To protect against this, we search for the cm_id after exiting
993 * timewait before notifying the user that we've exited timewait.
995 cm_id_priv->id.state = IB_CM_TIMEWAIT;
996 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
998 /* Check if the device started its remove_one */
999 spin_lock_irqsave(&cm.lock, flags);
1000 if (!cm_dev->going_down)
1001 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
1002 msecs_to_jiffies(wait_time));
1003 spin_unlock_irqrestore(&cm.lock, flags);
1006 * The timewait_info is converted into a work and gets freed during
1007 * cm_free_work() in cm_timewait_handler().
1009 BUILD_BUG_ON(offsetof(struct cm_timewait_info, work) != 0);
1010 cm_id_priv->timewait_info = NULL;
1013 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
1015 unsigned long flags;
1017 lockdep_assert_held(&cm_id_priv->lock);
1019 cm_id_priv->id.state = IB_CM_IDLE;
1020 if (cm_id_priv->timewait_info) {
1021 spin_lock_irqsave(&cm.lock, flags);
1022 cm_remove_remote(cm_id_priv);
1023 spin_unlock_irqrestore(&cm.lock, flags);
1024 kfree(cm_id_priv->timewait_info);
1025 cm_id_priv->timewait_info = NULL;
1029 static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id)
1031 struct cm_id_private *cm_id_priv;
1033 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1034 pr_err("%s: cm_id=%p timed out. state=%d refcnt=%d\n", __func__,
1035 cm_id, cm_id->state, refcount_read(&cm_id_priv->refcount));
1038 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
1040 struct cm_id_private *cm_id_priv;
1041 struct cm_work *work;
1044 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1045 spin_lock_irq(&cm_id_priv->lock);
1047 switch (cm_id->state) {
1049 spin_lock(&cm.lock);
1050 if (--cm_id_priv->listen_sharecount > 0) {
1051 /* The id is still shared. */
1052 WARN_ON(refcount_read(&cm_id_priv->refcount) == 1);
1053 spin_unlock(&cm.lock);
1054 spin_unlock_irq(&cm_id_priv->lock);
1055 cm_deref_id(cm_id_priv);
1058 cm_id->state = IB_CM_IDLE;
1059 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
1060 RB_CLEAR_NODE(&cm_id_priv->service_node);
1061 spin_unlock(&cm.lock);
1063 case IB_CM_SIDR_REQ_SENT:
1064 cm_id->state = IB_CM_IDLE;
1065 ib_cancel_mad(cm_id_priv->msg);
1067 case IB_CM_SIDR_REQ_RCVD:
1068 cm_send_sidr_rep_locked(cm_id_priv,
1069 &(struct ib_cm_sidr_rep_param){
1070 .status = IB_SIDR_REJECT });
1071 /* cm_send_sidr_rep_locked will not move to IDLE if it fails */
1072 cm_id->state = IB_CM_IDLE;
1074 case IB_CM_REQ_SENT:
1075 case IB_CM_MRA_REQ_RCVD:
1076 ib_cancel_mad(cm_id_priv->msg);
1077 cm_send_rej_locked(cm_id_priv, IB_CM_REJ_TIMEOUT,
1078 &cm_id_priv->id.device->node_guid,
1079 sizeof(cm_id_priv->id.device->node_guid),
1082 case IB_CM_REQ_RCVD:
1083 if (err == -ENOMEM) {
1084 /* Do not reject to allow future retries. */
1085 cm_reset_to_idle(cm_id_priv);
1087 cm_send_rej_locked(cm_id_priv,
1088 IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
1092 case IB_CM_REP_SENT:
1093 case IB_CM_MRA_REP_RCVD:
1094 ib_cancel_mad(cm_id_priv->msg);
1095 cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
1098 case IB_CM_MRA_REQ_SENT:
1099 case IB_CM_REP_RCVD:
1100 case IB_CM_MRA_REP_SENT:
1101 cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
1104 case IB_CM_ESTABLISHED:
1105 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
1106 cm_id->state = IB_CM_IDLE;
1109 cm_send_dreq_locked(cm_id_priv, NULL, 0);
1111 case IB_CM_DREQ_SENT:
1112 ib_cancel_mad(cm_id_priv->msg);
1113 cm_enter_timewait(cm_id_priv);
1115 case IB_CM_DREQ_RCVD:
1116 cm_send_drep_locked(cm_id_priv, NULL, 0);
1117 WARN_ON(cm_id->state != IB_CM_TIMEWAIT);
1119 case IB_CM_TIMEWAIT:
1121 * The cm_acquire_id in cm_timewait_handler will stop working
1122 * once we do xa_erase below, so just move to idle here for
1125 cm_id->state = IB_CM_IDLE;
1130 WARN_ON(cm_id->state != IB_CM_IDLE);
1132 spin_lock(&cm.lock);
1133 /* Required for cleanup paths related cm_req_handler() */
1134 if (cm_id_priv->timewait_info) {
1135 cm_remove_remote(cm_id_priv);
1136 kfree(cm_id_priv->timewait_info);
1137 cm_id_priv->timewait_info = NULL;
1140 WARN_ON(cm_id_priv->listen_sharecount);
1141 WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node));
1142 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
1143 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
1144 spin_unlock(&cm.lock);
1145 spin_unlock_irq(&cm_id_priv->lock);
1147 xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id));
1148 cm_deref_id(cm_id_priv);
1150 ret = wait_for_completion_timeout(&cm_id_priv->comp,
1152 CM_DESTROY_ID_WAIT_TIMEOUT));
1153 if (!ret) /* timeout happened */
1154 cm_destroy_id_wait_timeout(cm_id);
1157 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
1160 cm_destroy_av(&cm_id_priv->av);
1161 cm_destroy_av(&cm_id_priv->alt_av);
1162 kfree(cm_id_priv->private_data);
1163 kfree_rcu(cm_id_priv, rcu);
1166 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
1168 cm_destroy_id(cm_id, 0);
1170 EXPORT_SYMBOL(ib_destroy_cm_id);
1172 static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id)
1174 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
1175 (service_id != IB_CM_ASSIGN_SERVICE_ID))
1178 if (service_id == IB_CM_ASSIGN_SERVICE_ID)
1179 cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++);
1181 cm_id_priv->id.service_id = service_id;
1187 * ib_cm_listen - Initiates listening on the specified service ID for
1188 * connection and service ID resolution requests.
1189 * @cm_id: Connection identifier associated with the listen request.
1190 * @service_id: Service identifier matched against incoming connection
1191 * and service ID resolution requests. The service ID should be specified
1192 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1193 * assign a service ID to the caller.
1195 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id)
1197 struct cm_id_private *cm_id_priv =
1198 container_of(cm_id, struct cm_id_private, id);
1199 unsigned long flags;
1202 spin_lock_irqsave(&cm_id_priv->lock, flags);
1203 if (cm_id_priv->id.state != IB_CM_IDLE) {
1208 ret = cm_init_listen(cm_id_priv, service_id);
1212 if (!cm_insert_listen(cm_id_priv, NULL)) {
1217 cm_id_priv->id.state = IB_CM_LISTEN;
1221 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1224 EXPORT_SYMBOL(ib_cm_listen);
1227 * ib_cm_insert_listen - Create a new listening ib_cm_id and listen on
1228 * the given service ID.
1230 * If there's an existing ID listening on that same device and service ID,
1233 * @device: Device associated with the cm_id. All related communication will
1234 * be associated with the specified device.
1235 * @cm_handler: Callback invoked to notify the user of CM events.
1236 * @service_id: Service identifier matched against incoming connection
1237 * and service ID resolution requests. The service ID should be specified
1238 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1239 * assign a service ID to the caller.
1241 * Callers should call ib_destroy_cm_id when done with the listener ID.
1243 struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
1244 ib_cm_handler cm_handler,
1247 struct cm_id_private *listen_id_priv;
1248 struct cm_id_private *cm_id_priv;
1251 /* Create an ID in advance, since the creation may sleep */
1252 cm_id_priv = cm_alloc_id_priv(device, cm_handler, NULL);
1253 if (IS_ERR(cm_id_priv))
1254 return ERR_CAST(cm_id_priv);
1256 err = cm_init_listen(cm_id_priv, service_id);
1258 ib_destroy_cm_id(&cm_id_priv->id);
1259 return ERR_PTR(err);
1262 spin_lock_irq(&cm_id_priv->lock);
1263 listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler);
1264 if (listen_id_priv != cm_id_priv) {
1265 spin_unlock_irq(&cm_id_priv->lock);
1266 ib_destroy_cm_id(&cm_id_priv->id);
1267 if (!listen_id_priv)
1268 return ERR_PTR(-EINVAL);
1269 return &listen_id_priv->id;
1271 cm_id_priv->id.state = IB_CM_LISTEN;
1272 spin_unlock_irq(&cm_id_priv->lock);
1275 * A listen ID does not need to be in the xarray since it does not
1276 * receive mads, is not placed in the remote_id or remote_qpn rbtree,
1277 * and does not enter timewait.
1280 return &cm_id_priv->id;
1282 EXPORT_SYMBOL(ib_cm_insert_listen);
1284 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
1286 u64 hi_tid = 0, low_tid;
1288 lockdep_assert_held(&cm_id_priv->lock);
1290 low_tid = (u64)cm_id_priv->id.local_id;
1291 if (!cm_id_priv->av.port)
1292 return cpu_to_be64(low_tid);
1294 spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
1295 if (cm_id_priv->av.port->mad_agent)
1296 hi_tid = ((u64)cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1297 spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
1298 return cpu_to_be64(hi_tid | low_tid);
1301 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1302 __be16 attr_id, __be64 tid)
1304 hdr->base_version = IB_MGMT_BASE_VERSION;
1305 hdr->mgmt_class = IB_MGMT_CLASS_CM;
1306 hdr->class_version = IB_CM_CLASS_VERSION;
1307 hdr->method = IB_MGMT_METHOD_SEND;
1308 hdr->attr_id = attr_id;
1312 static void cm_format_mad_ece_hdr(struct ib_mad_hdr *hdr, __be16 attr_id,
1313 __be64 tid, u32 attr_mod)
1315 cm_format_mad_hdr(hdr, attr_id, tid);
1316 hdr->attr_mod = cpu_to_be32(attr_mod);
1319 static void cm_format_req(struct cm_req_msg *req_msg,
1320 struct cm_id_private *cm_id_priv,
1321 struct ib_cm_req_param *param)
1323 struct sa_path_rec *pri_path = param->primary_path;
1324 struct sa_path_rec *alt_path = param->alternate_path;
1325 bool pri_ext = false;
1328 if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
1329 pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
1330 pri_path->opa.slid);
1332 cm_format_mad_ece_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1333 cm_form_tid(cm_id_priv), param->ece.attr_mod);
1335 IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg,
1336 be32_to_cpu(cm_id_priv->id.local_id));
1337 IBA_SET(CM_REQ_SERVICE_ID, req_msg, be64_to_cpu(param->service_id));
1338 IBA_SET(CM_REQ_LOCAL_CA_GUID, req_msg,
1339 be64_to_cpu(cm_id_priv->id.device->node_guid));
1340 IBA_SET(CM_REQ_LOCAL_QPN, req_msg, param->qp_num);
1341 IBA_SET(CM_REQ_INITIATOR_DEPTH, req_msg, param->initiator_depth);
1342 IBA_SET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg,
1343 param->remote_cm_response_timeout);
1344 cm_req_set_qp_type(req_msg, param->qp_type);
1345 IBA_SET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg, param->flow_control);
1346 IBA_SET(CM_REQ_STARTING_PSN, req_msg, param->starting_psn);
1347 IBA_SET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg,
1348 param->local_cm_response_timeout);
1349 IBA_SET(CM_REQ_PARTITION_KEY, req_msg,
1350 be16_to_cpu(param->primary_path->pkey));
1351 IBA_SET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg,
1352 param->primary_path->mtu);
1353 IBA_SET(CM_REQ_MAX_CM_RETRIES, req_msg, param->max_cm_retries);
1355 if (param->qp_type != IB_QPT_XRC_INI) {
1356 IBA_SET(CM_REQ_RESPONDER_RESOURCES, req_msg,
1357 param->responder_resources);
1358 IBA_SET(CM_REQ_RETRY_COUNT, req_msg, param->retry_count);
1359 IBA_SET(CM_REQ_RNR_RETRY_COUNT, req_msg,
1360 param->rnr_retry_count);
1361 IBA_SET(CM_REQ_SRQ, req_msg, param->srq);
1364 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) =
1366 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) =
1369 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg)
1370 ->global.interface_id =
1371 OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
1372 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg)
1373 ->global.interface_id =
1374 OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
1376 if (pri_path->hop_limit <= 1) {
1377 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1378 be16_to_cpu(pri_ext ? 0 :
1379 htons(ntohl(sa_path_get_slid(
1381 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1382 be16_to_cpu(pri_ext ? 0 :
1383 htons(ntohl(sa_path_get_dlid(
1387 if (param->primary_path_inbound) {
1388 lid = param->primary_path_inbound->ib.dlid;
1389 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1392 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1393 be16_to_cpu(IB_LID_PERMISSIVE));
1395 /* Work-around until there's a way to obtain remote LID info */
1396 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1397 be16_to_cpu(IB_LID_PERMISSIVE));
1399 IBA_SET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg,
1400 be32_to_cpu(pri_path->flow_label));
1401 IBA_SET(CM_REQ_PRIMARY_PACKET_RATE, req_msg, pri_path->rate);
1402 IBA_SET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg, pri_path->traffic_class);
1403 IBA_SET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg, pri_path->hop_limit);
1404 IBA_SET(CM_REQ_PRIMARY_SL, req_msg, pri_path->sl);
1405 IBA_SET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg,
1406 (pri_path->hop_limit <= 1));
1407 IBA_SET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg,
1408 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1409 pri_path->packet_life_time));
1412 bool alt_ext = false;
1414 if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA)
1415 alt_ext = opa_is_extended_lid(alt_path->opa.dlid,
1416 alt_path->opa.slid);
1418 *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg) =
1420 *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg) =
1423 IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1425 ->global.interface_id =
1426 OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
1427 IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID,
1429 ->global.interface_id =
1430 OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
1432 if (alt_path->hop_limit <= 1) {
1433 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1436 htons(ntohl(sa_path_get_slid(
1438 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1441 htons(ntohl(sa_path_get_dlid(
1444 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1445 be16_to_cpu(IB_LID_PERMISSIVE));
1446 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1447 be16_to_cpu(IB_LID_PERMISSIVE));
1449 IBA_SET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg,
1450 be32_to_cpu(alt_path->flow_label));
1451 IBA_SET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg, alt_path->rate);
1452 IBA_SET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg,
1453 alt_path->traffic_class);
1454 IBA_SET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg,
1455 alt_path->hop_limit);
1456 IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, alt_path->sl);
1457 IBA_SET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg,
1458 (alt_path->hop_limit <= 1));
1459 IBA_SET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg,
1460 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1461 alt_path->packet_life_time));
1463 IBA_SET(CM_REQ_VENDOR_ID, req_msg, param->ece.vendor_id);
1465 if (param->private_data && param->private_data_len)
1466 IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data,
1467 param->private_data_len);
1470 static int cm_validate_req_param(struct ib_cm_req_param *param)
1472 if (!param->primary_path)
1475 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1476 param->qp_type != IB_QPT_XRC_INI)
1479 if (param->private_data &&
1480 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1483 if (param->alternate_path &&
1484 (param->alternate_path->pkey != param->primary_path->pkey ||
1485 param->alternate_path->mtu != param->primary_path->mtu))
1491 int ib_send_cm_req(struct ib_cm_id *cm_id,
1492 struct ib_cm_req_param *param)
1494 struct cm_av av = {}, alt_av = {};
1495 struct cm_id_private *cm_id_priv;
1496 struct ib_mad_send_buf *msg;
1497 struct cm_req_msg *req_msg;
1498 unsigned long flags;
1501 ret = cm_validate_req_param(param);
1505 /* Verify that we're not in timewait. */
1506 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1507 spin_lock_irqsave(&cm_id_priv->lock, flags);
1508 if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) {
1509 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1512 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1514 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1516 if (IS_ERR(cm_id_priv->timewait_info)) {
1517 ret = PTR_ERR(cm_id_priv->timewait_info);
1518 cm_id_priv->timewait_info = NULL;
1522 ret = cm_init_av_by_path(param->primary_path,
1523 param->ppath_sgid_attr, &av);
1526 if (param->alternate_path) {
1527 ret = cm_init_av_by_path(param->alternate_path, NULL,
1534 cm_id->service_id = param->service_id;
1535 cm_id_priv->timeout_ms = cm_convert_to_ms(
1536 param->primary_path->packet_life_time) * 2 +
1538 param->remote_cm_response_timeout);
1539 cm_id_priv->max_cm_retries = param->max_cm_retries;
1540 cm_id_priv->initiator_depth = param->initiator_depth;
1541 cm_id_priv->responder_resources = param->responder_resources;
1542 cm_id_priv->retry_count = param->retry_count;
1543 cm_id_priv->path_mtu = param->primary_path->mtu;
1544 cm_id_priv->pkey = param->primary_path->pkey;
1545 cm_id_priv->qp_type = param->qp_type;
1547 spin_lock_irqsave(&cm_id_priv->lock, flags);
1549 cm_move_av_from_path(&cm_id_priv->av, &av);
1550 if (param->primary_path_outbound)
1551 cm_id_priv->av.dlid_datapath =
1552 be16_to_cpu(param->primary_path_outbound->ib.dlid);
1554 if (param->alternate_path)
1555 cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av);
1557 msg = cm_alloc_priv_msg(cm_id_priv);
1563 req_msg = (struct cm_req_msg *)msg->mad;
1564 cm_format_req(req_msg, cm_id_priv, param);
1565 cm_id_priv->tid = req_msg->hdr.tid;
1566 msg->timeout_ms = cm_id_priv->timeout_ms;
1567 msg->context[1] = (void *)(unsigned long)IB_CM_REQ_SENT;
1569 cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
1570 cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
1572 trace_icm_send_req(&cm_id_priv->id);
1573 ret = ib_post_send_mad(msg, NULL);
1576 BUG_ON(cm_id->state != IB_CM_IDLE);
1577 cm_id->state = IB_CM_REQ_SENT;
1578 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1581 cm_free_priv_msg(msg);
1583 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1586 EXPORT_SYMBOL(ib_send_cm_req);
1588 static int cm_issue_rej(struct cm_port *port,
1589 struct ib_mad_recv_wc *mad_recv_wc,
1590 enum ib_cm_rej_reason reason,
1591 enum cm_msg_response msg_rejected,
1592 void *ari, u8 ari_length)
1594 struct ib_mad_send_buf *msg = NULL;
1595 struct cm_rej_msg *rej_msg, *rcv_msg;
1598 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1602 /* We just need common CM header information. Cast to any message. */
1603 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1604 rej_msg = (struct cm_rej_msg *) msg->mad;
1606 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1607 IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1608 IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg));
1609 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1610 IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
1611 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, msg_rejected);
1612 IBA_SET(CM_REJ_REASON, rej_msg, reason);
1614 if (ari && ari_length) {
1615 IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1616 IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
1619 trace_icm_issue_rej(
1620 IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg),
1621 IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
1622 ret = ib_post_send_mad(msg, NULL);
1624 cm_free_response_msg(msg);
1629 static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
1631 return ((cpu_to_be16(
1632 IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg))) ||
1633 (ib_is_opa_gid(IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1637 static void cm_path_set_rec_type(struct ib_device *ib_device, u32 port_num,
1638 struct sa_path_rec *path, union ib_gid *gid)
1640 if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
1641 path->rec_type = SA_PATH_REC_TYPE_OPA;
1643 path->rec_type = SA_PATH_REC_TYPE_IB;
1646 static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
1647 struct sa_path_rec *primary_path,
1648 struct sa_path_rec *alt_path,
1653 if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1654 sa_path_set_dlid(primary_path, wc->slid);
1655 sa_path_set_slid(primary_path,
1656 IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
1659 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1660 CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg));
1661 sa_path_set_dlid(primary_path, lid);
1663 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1664 CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg));
1665 sa_path_set_slid(primary_path, lid);
1668 if (!cm_req_has_alt_path(req_msg))
1671 if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1672 sa_path_set_dlid(alt_path,
1673 IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
1675 sa_path_set_slid(alt_path,
1676 IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
1679 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1680 CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg));
1681 sa_path_set_dlid(alt_path, lid);
1683 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1684 CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg));
1685 sa_path_set_slid(alt_path, lid);
1689 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1690 struct sa_path_rec *primary_path,
1691 struct sa_path_rec *alt_path,
1694 primary_path->dgid =
1695 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg);
1696 primary_path->sgid =
1697 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg);
1698 primary_path->flow_label =
1699 cpu_to_be32(IBA_GET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg));
1700 primary_path->hop_limit = IBA_GET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg);
1701 primary_path->traffic_class =
1702 IBA_GET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg);
1703 primary_path->reversible = 1;
1704 primary_path->pkey =
1705 cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1706 primary_path->sl = IBA_GET(CM_REQ_PRIMARY_SL, req_msg);
1707 primary_path->mtu_selector = IB_SA_EQ;
1708 primary_path->mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
1709 primary_path->rate_selector = IB_SA_EQ;
1710 primary_path->rate = IBA_GET(CM_REQ_PRIMARY_PACKET_RATE, req_msg);
1711 primary_path->packet_life_time_selector = IB_SA_EQ;
1712 primary_path->packet_life_time =
1713 IBA_GET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg);
1714 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1715 primary_path->service_id =
1716 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
1717 if (sa_path_is_roce(primary_path))
1718 primary_path->roce.route_resolved = false;
1720 if (cm_req_has_alt_path(req_msg)) {
1721 alt_path->dgid = *IBA_GET_MEM_PTR(
1722 CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg);
1723 alt_path->sgid = *IBA_GET_MEM_PTR(
1724 CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg);
1725 alt_path->flow_label = cpu_to_be32(
1726 IBA_GET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg));
1727 alt_path->hop_limit =
1728 IBA_GET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg);
1729 alt_path->traffic_class =
1730 IBA_GET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg);
1731 alt_path->reversible = 1;
1733 cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1734 alt_path->sl = IBA_GET(CM_REQ_ALTERNATE_SL, req_msg);
1735 alt_path->mtu_selector = IB_SA_EQ;
1737 IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
1738 alt_path->rate_selector = IB_SA_EQ;
1739 alt_path->rate = IBA_GET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg);
1740 alt_path->packet_life_time_selector = IB_SA_EQ;
1741 alt_path->packet_life_time =
1742 IBA_GET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg);
1743 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1744 alt_path->service_id =
1745 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
1747 if (sa_path_is_roce(alt_path))
1748 alt_path->roce.route_resolved = false;
1750 cm_format_path_lid_from_req(req_msg, primary_path, alt_path, wc);
1753 static u16 cm_get_bth_pkey(struct cm_work *work)
1755 struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1756 u32 port_num = work->port->port_num;
1757 u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1761 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1763 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %u, pkey index %u). %d\n",
1764 port_num, pkey_index, ret);
1772 * cm_opa_to_ib_sgid - Convert OPA SGID to IB SGID
1773 * ULPs (such as IPoIB) do not understand OPA GIDs and will
1774 * reject them as the local_gid will not match the sgid. Therefore,
1775 * change the pathrec's SGID to an IB SGID.
1777 * @work: Work completion
1778 * @path: Path record
1780 static void cm_opa_to_ib_sgid(struct cm_work *work,
1781 struct sa_path_rec *path)
1783 struct ib_device *dev = work->port->cm_dev->ib_device;
1784 u32 port_num = work->port->port_num;
1786 if (rdma_cap_opa_ah(dev, port_num) &&
1787 (ib_is_opa_gid(&path->sgid))) {
1790 if (rdma_query_gid(dev, port_num, 0, &sgid)) {
1792 "Error updating sgid in CM request\n");
1800 static void cm_format_req_event(struct cm_work *work,
1801 struct cm_id_private *cm_id_priv,
1802 struct ib_cm_id *listen_id)
1804 struct cm_req_msg *req_msg;
1805 struct ib_cm_req_event_param *param;
1807 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1808 param = &work->cm_event.param.req_rcvd;
1809 param->listen_id = listen_id;
1810 param->bth_pkey = cm_get_bth_pkey(work);
1811 param->port = cm_id_priv->av.port->port_num;
1812 param->primary_path = &work->path[0];
1813 cm_opa_to_ib_sgid(work, param->primary_path);
1814 if (cm_req_has_alt_path(req_msg)) {
1815 param->alternate_path = &work->path[1];
1816 cm_opa_to_ib_sgid(work, param->alternate_path);
1818 param->alternate_path = NULL;
1820 param->remote_ca_guid =
1821 cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
1822 param->remote_qkey = IBA_GET(CM_REQ_LOCAL_Q_KEY, req_msg);
1823 param->remote_qpn = IBA_GET(CM_REQ_LOCAL_QPN, req_msg);
1824 param->qp_type = cm_req_get_qp_type(req_msg);
1825 param->starting_psn = IBA_GET(CM_REQ_STARTING_PSN, req_msg);
1826 param->responder_resources = IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
1827 param->initiator_depth = IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
1828 param->local_cm_response_timeout =
1829 IBA_GET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg);
1830 param->flow_control = IBA_GET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg);
1831 param->remote_cm_response_timeout =
1832 IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg);
1833 param->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
1834 param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
1835 param->srq = IBA_GET(CM_REQ_SRQ, req_msg);
1836 param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
1837 param->ece.vendor_id = IBA_GET(CM_REQ_VENDOR_ID, req_msg);
1838 param->ece.attr_mod = be32_to_cpu(req_msg->hdr.attr_mod);
1840 work->cm_event.private_data =
1841 IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg);
1844 static void cm_process_work(struct cm_id_private *cm_id_priv,
1845 struct cm_work *work)
1849 /* We will typically only have the current event to report. */
1850 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1853 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1854 spin_lock_irq(&cm_id_priv->lock);
1855 work = cm_dequeue_work(cm_id_priv);
1856 spin_unlock_irq(&cm_id_priv->lock);
1860 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1864 cm_deref_id(cm_id_priv);
1866 cm_destroy_id(&cm_id_priv->id, ret);
1869 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1870 struct cm_id_private *cm_id_priv,
1871 enum cm_msg_response msg_mraed, u8 service_timeout,
1872 const void *private_data, u8 private_data_len)
1874 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1875 IBA_SET(CM_MRA_MESSAGE_MRAED, mra_msg, msg_mraed);
1876 IBA_SET(CM_MRA_LOCAL_COMM_ID, mra_msg,
1877 be32_to_cpu(cm_id_priv->id.local_id));
1878 IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg,
1879 be32_to_cpu(cm_id_priv->id.remote_id));
1880 IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout);
1882 if (private_data && private_data_len)
1883 IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data,
1887 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1888 struct cm_id_private *cm_id_priv,
1889 enum ib_cm_rej_reason reason, void *ari,
1890 u8 ari_length, const void *private_data,
1891 u8 private_data_len, enum ib_cm_state state)
1893 lockdep_assert_held(&cm_id_priv->lock);
1895 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1896 IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1897 be32_to_cpu(cm_id_priv->id.remote_id));
1900 case IB_CM_REQ_RCVD:
1901 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0));
1902 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
1904 case IB_CM_MRA_REQ_SENT:
1905 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1906 be32_to_cpu(cm_id_priv->id.local_id));
1907 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
1909 case IB_CM_REP_RCVD:
1910 case IB_CM_MRA_REP_SENT:
1911 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1912 be32_to_cpu(cm_id_priv->id.local_id));
1913 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REP);
1916 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1917 be32_to_cpu(cm_id_priv->id.local_id));
1918 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg,
1919 CM_MSG_RESPONSE_OTHER);
1923 IBA_SET(CM_REJ_REASON, rej_msg, reason);
1924 if (ari && ari_length) {
1925 IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1926 IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
1929 if (private_data && private_data_len)
1930 IBA_SET_MEM(CM_REJ_PRIVATE_DATA, rej_msg, private_data,
1934 static void cm_dup_req_handler(struct cm_work *work,
1935 struct cm_id_private *cm_id_priv)
1937 struct ib_mad_send_buf *msg = NULL;
1941 &work->port->counters[CM_RECV_DUPLICATES][CM_REQ_COUNTER]);
1943 /* Quick state check to discard duplicate REQs. */
1944 spin_lock_irq(&cm_id_priv->lock);
1945 if (cm_id_priv->id.state == IB_CM_REQ_RCVD) {
1946 spin_unlock_irq(&cm_id_priv->lock);
1949 spin_unlock_irq(&cm_id_priv->lock);
1951 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1955 spin_lock_irq(&cm_id_priv->lock);
1956 switch (cm_id_priv->id.state) {
1957 case IB_CM_MRA_REQ_SENT:
1958 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1959 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1960 cm_id_priv->private_data,
1961 cm_id_priv->private_data_len);
1963 case IB_CM_TIMEWAIT:
1964 cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv,
1965 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0,
1971 spin_unlock_irq(&cm_id_priv->lock);
1973 trace_icm_send_dup_req(&cm_id_priv->id);
1974 ret = ib_post_send_mad(msg, NULL);
1979 unlock: spin_unlock_irq(&cm_id_priv->lock);
1980 free: cm_free_response_msg(msg);
1983 static struct cm_id_private *cm_match_req(struct cm_work *work,
1984 struct cm_id_private *cm_id_priv)
1986 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1987 struct cm_timewait_info *timewait_info;
1988 struct cm_req_msg *req_msg;
1990 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1992 /* Check for possible duplicate REQ. */
1993 spin_lock_irq(&cm.lock);
1994 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1995 if (timewait_info) {
1996 cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
1997 timewait_info->work.remote_id);
1998 spin_unlock_irq(&cm.lock);
1999 if (cur_cm_id_priv) {
2000 cm_dup_req_handler(work, cur_cm_id_priv);
2001 cm_deref_id(cur_cm_id_priv);
2006 /* Check for stale connections. */
2007 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
2008 if (timewait_info) {
2009 cm_remove_remote(cm_id_priv);
2010 cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2011 timewait_info->work.remote_id);
2013 spin_unlock_irq(&cm.lock);
2014 cm_issue_rej(work->port, work->mad_recv_wc,
2015 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
2017 if (cur_cm_id_priv) {
2018 ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
2019 cm_deref_id(cur_cm_id_priv);
2024 /* Find matching listen request. */
2025 listen_cm_id_priv = cm_find_listen(
2026 cm_id_priv->id.device,
2027 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)));
2028 if (!listen_cm_id_priv) {
2029 cm_remove_remote(cm_id_priv);
2030 spin_unlock_irq(&cm.lock);
2031 cm_issue_rej(work->port, work->mad_recv_wc,
2032 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
2036 spin_unlock_irq(&cm.lock);
2037 return listen_cm_id_priv;
2041 * Work-around for inter-subnet connections. If the LIDs are permissive,
2042 * we need to override the LID/SL data in the REQ with the LID information
2043 * in the work completion.
2045 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
2047 if (!IBA_GET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg)) {
2048 if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
2049 req_msg)) == IB_LID_PERMISSIVE) {
2050 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
2051 be16_to_cpu(ib_lid_be16(wc->slid)));
2052 IBA_SET(CM_REQ_PRIMARY_SL, req_msg, wc->sl);
2055 if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
2056 req_msg)) == IB_LID_PERMISSIVE)
2057 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
2058 wc->dlid_path_bits);
2061 if (!IBA_GET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg)) {
2062 if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
2063 req_msg)) == IB_LID_PERMISSIVE) {
2064 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
2065 be16_to_cpu(ib_lid_be16(wc->slid)));
2066 IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, wc->sl);
2069 if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
2070 req_msg)) == IB_LID_PERMISSIVE)
2071 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
2072 wc->dlid_path_bits);
2076 static int cm_req_handler(struct cm_work *work)
2078 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
2079 struct cm_req_msg *req_msg;
2080 const struct ib_global_route *grh;
2081 const struct ib_gid_attr *gid_attr;
2084 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
2087 cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
2088 if (IS_ERR(cm_id_priv))
2089 return PTR_ERR(cm_id_priv);
2091 cm_id_priv->id.remote_id =
2092 cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
2093 cm_id_priv->id.service_id =
2094 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
2095 cm_id_priv->tid = req_msg->hdr.tid;
2096 cm_id_priv->timeout_ms = cm_convert_to_ms(
2097 IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
2098 cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg);
2099 cm_id_priv->remote_qpn =
2100 cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
2101 cm_id_priv->initiator_depth =
2102 IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
2103 cm_id_priv->responder_resources =
2104 IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
2105 cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
2106 cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
2107 cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
2108 cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
2109 cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
2110 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
2112 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2113 work->mad_recv_wc->recv_buf.grh,
2117 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
2119 if (IS_ERR(cm_id_priv->timewait_info)) {
2120 ret = PTR_ERR(cm_id_priv->timewait_info);
2121 cm_id_priv->timewait_info = NULL;
2124 cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;
2125 cm_id_priv->timewait_info->remote_ca_guid =
2126 cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
2127 cm_id_priv->timewait_info->remote_qpn = cm_id_priv->remote_qpn;
2130 * Note that the ID pointer is not in the xarray at this point,
2131 * so this set is only visible to the local thread.
2133 cm_id_priv->id.state = IB_CM_REQ_RCVD;
2135 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
2136 if (!listen_cm_id_priv) {
2137 trace_icm_no_listener_err(&cm_id_priv->id);
2138 cm_id_priv->id.state = IB_CM_IDLE;
2143 memset(&work->path[0], 0, sizeof(work->path[0]));
2144 if (cm_req_has_alt_path(req_msg))
2145 memset(&work->path[1], 0, sizeof(work->path[1]));
2146 grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
2147 gid_attr = grh->sgid_attr;
2149 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) {
2150 work->path[0].rec_type =
2151 sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
2153 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
2154 cm_path_set_rec_type(
2155 work->port->cm_dev->ib_device, work->port->port_num,
2157 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID,
2160 if (cm_req_has_alt_path(req_msg))
2161 work->path[1].rec_type = work->path[0].rec_type;
2162 cm_format_paths_from_req(req_msg, &work->path[0],
2163 &work->path[1], work->mad_recv_wc->wc);
2164 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
2165 sa_path_set_dmac(&work->path[0],
2166 cm_id_priv->av.ah_attr.roce.dmac);
2167 work->path[0].hop_limit = grh->hop_limit;
2169 /* This destroy call is needed to pair with cm_init_av_for_response */
2170 cm_destroy_av(&cm_id_priv->av);
2171 ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av);
2175 err = rdma_query_gid(work->port->cm_dev->ib_device,
2176 work->port->port_num, 0,
2177 &work->path[0].sgid);
2179 ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
2182 ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
2183 &work->path[0].sgid,
2184 sizeof(work->path[0].sgid),
2188 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_IB)
2189 cm_id_priv->av.dlid_datapath =
2190 IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg);
2192 if (cm_req_has_alt_path(req_msg)) {
2193 ret = cm_init_av_by_path(&work->path[1], NULL,
2194 &cm_id_priv->alt_av);
2196 ib_send_cm_rej(&cm_id_priv->id,
2197 IB_CM_REJ_INVALID_ALT_GID,
2198 &work->path[0].sgid,
2199 sizeof(work->path[0].sgid), NULL, 0);
2204 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
2205 cm_id_priv->id.context = listen_cm_id_priv->id.context;
2206 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
2208 /* Now MAD handlers can see the new ID */
2209 spin_lock_irq(&cm_id_priv->lock);
2210 cm_finalize_id(cm_id_priv);
2212 /* Refcount belongs to the event, pairs with cm_process_work() */
2213 refcount_inc(&cm_id_priv->refcount);
2214 cm_queue_work_unlock(cm_id_priv, work);
2216 * Since this ID was just created and was not made visible to other MAD
2217 * handlers until the cm_finalize_id() above we know that the
2218 * cm_process_work() will deliver the event and the listen_cm_id
2219 * embedded in the event can be derefed here.
2221 cm_deref_id(listen_cm_id_priv);
2225 cm_deref_id(listen_cm_id_priv);
2227 ib_destroy_cm_id(&cm_id_priv->id);
2231 static void cm_format_rep(struct cm_rep_msg *rep_msg,
2232 struct cm_id_private *cm_id_priv,
2233 struct ib_cm_rep_param *param)
2235 cm_format_mad_ece_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid,
2236 param->ece.attr_mod);
2237 IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg,
2238 be32_to_cpu(cm_id_priv->id.local_id));
2239 IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg,
2240 be32_to_cpu(cm_id_priv->id.remote_id));
2241 IBA_SET(CM_REP_STARTING_PSN, rep_msg, param->starting_psn);
2242 IBA_SET(CM_REP_RESPONDER_RESOURCES, rep_msg,
2243 param->responder_resources);
2244 IBA_SET(CM_REP_TARGET_ACK_DELAY, rep_msg,
2245 cm_id_priv->av.port->cm_dev->ack_delay);
2246 IBA_SET(CM_REP_FAILOVER_ACCEPTED, rep_msg, param->failover_accepted);
2247 IBA_SET(CM_REP_RNR_RETRY_COUNT, rep_msg, param->rnr_retry_count);
2248 IBA_SET(CM_REP_LOCAL_CA_GUID, rep_msg,
2249 be64_to_cpu(cm_id_priv->id.device->node_guid));
2251 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
2252 IBA_SET(CM_REP_INITIATOR_DEPTH, rep_msg,
2253 param->initiator_depth);
2254 IBA_SET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg,
2255 param->flow_control);
2256 IBA_SET(CM_REP_SRQ, rep_msg, param->srq);
2257 IBA_SET(CM_REP_LOCAL_QPN, rep_msg, param->qp_num);
2259 IBA_SET(CM_REP_SRQ, rep_msg, 1);
2260 IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num);
2263 IBA_SET(CM_REP_VENDOR_ID_L, rep_msg, param->ece.vendor_id);
2264 IBA_SET(CM_REP_VENDOR_ID_M, rep_msg, param->ece.vendor_id >> 8);
2265 IBA_SET(CM_REP_VENDOR_ID_H, rep_msg, param->ece.vendor_id >> 16);
2267 if (param->private_data && param->private_data_len)
2268 IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data,
2269 param->private_data_len);
2272 int ib_send_cm_rep(struct ib_cm_id *cm_id,
2273 struct ib_cm_rep_param *param)
2275 struct cm_id_private *cm_id_priv;
2276 struct ib_mad_send_buf *msg;
2277 struct cm_rep_msg *rep_msg;
2278 unsigned long flags;
2281 if (param->private_data &&
2282 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
2285 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2286 spin_lock_irqsave(&cm_id_priv->lock, flags);
2287 if (cm_id->state != IB_CM_REQ_RCVD &&
2288 cm_id->state != IB_CM_MRA_REQ_SENT) {
2289 trace_icm_send_rep_err(cm_id_priv->id.local_id, cm_id->state);
2294 msg = cm_alloc_priv_msg(cm_id_priv);
2300 rep_msg = (struct cm_rep_msg *) msg->mad;
2301 cm_format_rep(rep_msg, cm_id_priv, param);
2302 msg->timeout_ms = cm_id_priv->timeout_ms;
2303 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
2305 trace_icm_send_rep(cm_id);
2306 ret = ib_post_send_mad(msg, NULL);
2310 cm_id->state = IB_CM_REP_SENT;
2311 cm_id_priv->initiator_depth = param->initiator_depth;
2312 cm_id_priv->responder_resources = param->responder_resources;
2313 cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2314 WARN_ONCE(param->qp_num & 0xFF000000,
2315 "IBTA declares QPN to be 24 bits, but it is 0x%X\n",
2317 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
2318 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2322 cm_free_priv_msg(msg);
2324 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2327 EXPORT_SYMBOL(ib_send_cm_rep);
2329 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
2330 struct cm_id_private *cm_id_priv,
2331 const void *private_data,
2332 u8 private_data_len)
2334 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
2335 IBA_SET(CM_RTU_LOCAL_COMM_ID, rtu_msg,
2336 be32_to_cpu(cm_id_priv->id.local_id));
2337 IBA_SET(CM_RTU_REMOTE_COMM_ID, rtu_msg,
2338 be32_to_cpu(cm_id_priv->id.remote_id));
2340 if (private_data && private_data_len)
2341 IBA_SET_MEM(CM_RTU_PRIVATE_DATA, rtu_msg, private_data,
2345 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
2346 const void *private_data,
2347 u8 private_data_len)
2349 struct cm_id_private *cm_id_priv;
2350 struct ib_mad_send_buf *msg;
2351 unsigned long flags;
2355 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
2358 data = cm_copy_private_data(private_data, private_data_len);
2360 return PTR_ERR(data);
2362 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2363 spin_lock_irqsave(&cm_id_priv->lock, flags);
2364 if (cm_id->state != IB_CM_REP_RCVD &&
2365 cm_id->state != IB_CM_MRA_REP_SENT) {
2366 trace_icm_send_cm_rtu_err(cm_id);
2371 msg = cm_alloc_msg(cm_id_priv);
2377 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2378 private_data, private_data_len);
2380 trace_icm_send_rtu(cm_id);
2381 ret = ib_post_send_mad(msg, NULL);
2383 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2389 cm_id->state = IB_CM_ESTABLISHED;
2390 cm_set_private_data(cm_id_priv, data, private_data_len);
2391 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2394 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2398 EXPORT_SYMBOL(ib_send_cm_rtu);
2400 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
2402 struct cm_rep_msg *rep_msg;
2403 struct ib_cm_rep_event_param *param;
2405 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2406 param = &work->cm_event.param.rep_rcvd;
2407 param->remote_ca_guid =
2408 cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
2409 param->remote_qkey = IBA_GET(CM_REP_LOCAL_Q_KEY, rep_msg);
2410 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
2411 param->starting_psn = IBA_GET(CM_REP_STARTING_PSN, rep_msg);
2412 param->responder_resources = IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2413 param->initiator_depth = IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2414 param->target_ack_delay = IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
2415 param->failover_accepted = IBA_GET(CM_REP_FAILOVER_ACCEPTED, rep_msg);
2416 param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg);
2417 param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2418 param->srq = IBA_GET(CM_REP_SRQ, rep_msg);
2419 param->ece.vendor_id = IBA_GET(CM_REP_VENDOR_ID_H, rep_msg) << 16;
2420 param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_M, rep_msg) << 8;
2421 param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_L, rep_msg);
2422 param->ece.attr_mod = be32_to_cpu(rep_msg->hdr.attr_mod);
2424 work->cm_event.private_data =
2425 IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg);
2428 static void cm_dup_rep_handler(struct cm_work *work)
2430 struct cm_id_private *cm_id_priv;
2431 struct cm_rep_msg *rep_msg;
2432 struct ib_mad_send_buf *msg = NULL;
2435 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
2436 cm_id_priv = cm_acquire_id(
2437 cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)),
2438 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)));
2443 &work->port->counters[CM_RECV_DUPLICATES][CM_REP_COUNTER]);
2444 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
2448 spin_lock_irq(&cm_id_priv->lock);
2449 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
2450 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2451 cm_id_priv->private_data,
2452 cm_id_priv->private_data_len);
2453 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
2454 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2455 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
2456 cm_id_priv->private_data,
2457 cm_id_priv->private_data_len);
2460 spin_unlock_irq(&cm_id_priv->lock);
2462 trace_icm_send_dup_rep(&cm_id_priv->id);
2463 ret = ib_post_send_mad(msg, NULL);
2468 unlock: spin_unlock_irq(&cm_id_priv->lock);
2469 free: cm_free_response_msg(msg);
2470 deref: cm_deref_id(cm_id_priv);
2473 static int cm_rep_handler(struct cm_work *work)
2475 struct cm_id_private *cm_id_priv;
2476 struct cm_rep_msg *rep_msg;
2478 struct cm_id_private *cur_cm_id_priv;
2479 struct cm_timewait_info *timewait_info;
2481 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2482 cm_id_priv = cm_acquire_id(
2483 cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), 0);
2485 cm_dup_rep_handler(work);
2486 trace_icm_remote_no_priv_err(
2487 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2491 cm_format_rep_event(work, cm_id_priv->qp_type);
2493 spin_lock_irq(&cm_id_priv->lock);
2494 switch (cm_id_priv->id.state) {
2495 case IB_CM_REQ_SENT:
2496 case IB_CM_MRA_REQ_RCVD:
2500 trace_icm_rep_unknown_err(
2501 IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2502 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg),
2503 cm_id_priv->id.state);
2504 spin_unlock_irq(&cm_id_priv->lock);
2508 cm_id_priv->timewait_info->work.remote_id =
2509 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
2510 cm_id_priv->timewait_info->remote_ca_guid =
2511 cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
2512 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2514 spin_lock(&cm.lock);
2515 /* Check for duplicate REP. */
2516 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
2517 spin_unlock(&cm.lock);
2518 spin_unlock_irq(&cm_id_priv->lock);
2520 trace_icm_insert_failed_err(
2521 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2524 /* Check for a stale connection. */
2525 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
2526 if (timewait_info) {
2527 cm_remove_remote(cm_id_priv);
2528 cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2529 timewait_info->work.remote_id);
2531 spin_unlock(&cm.lock);
2532 spin_unlock_irq(&cm_id_priv->lock);
2533 cm_issue_rej(work->port, work->mad_recv_wc,
2534 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
2537 trace_icm_staleconn_err(
2538 IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2539 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2541 if (cur_cm_id_priv) {
2542 ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
2543 cm_deref_id(cur_cm_id_priv);
2548 spin_unlock(&cm.lock);
2550 cm_id_priv->id.state = IB_CM_REP_RCVD;
2551 cm_id_priv->id.remote_id =
2552 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
2553 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2554 cm_id_priv->initiator_depth =
2555 IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2556 cm_id_priv->responder_resources =
2557 IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2558 cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2559 cm_id_priv->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2560 cm_id_priv->target_ack_delay =
2561 IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
2562 cm_id_priv->av.timeout =
2563 cm_ack_timeout(cm_id_priv->target_ack_delay,
2564 cm_id_priv->av.timeout - 1);
2565 cm_id_priv->alt_av.timeout =
2566 cm_ack_timeout(cm_id_priv->target_ack_delay,
2567 cm_id_priv->alt_av.timeout - 1);
2569 ib_cancel_mad(cm_id_priv->msg);
2570 cm_queue_work_unlock(cm_id_priv, work);
2574 cm_deref_id(cm_id_priv);
2578 static int cm_establish_handler(struct cm_work *work)
2580 struct cm_id_private *cm_id_priv;
2582 /* See comment in cm_establish about lookup. */
2583 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
2587 spin_lock_irq(&cm_id_priv->lock);
2588 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2589 spin_unlock_irq(&cm_id_priv->lock);
2593 ib_cancel_mad(cm_id_priv->msg);
2594 cm_queue_work_unlock(cm_id_priv, work);
2597 cm_deref_id(cm_id_priv);
2601 static int cm_rtu_handler(struct cm_work *work)
2603 struct cm_id_private *cm_id_priv;
2604 struct cm_rtu_msg *rtu_msg;
2606 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2607 cm_id_priv = cm_acquire_id(
2608 cpu_to_be32(IBA_GET(CM_RTU_REMOTE_COMM_ID, rtu_msg)),
2609 cpu_to_be32(IBA_GET(CM_RTU_LOCAL_COMM_ID, rtu_msg)));
2613 work->cm_event.private_data =
2614 IBA_GET_MEM_PTR(CM_RTU_PRIVATE_DATA, rtu_msg);
2616 spin_lock_irq(&cm_id_priv->lock);
2617 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2618 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2619 spin_unlock_irq(&cm_id_priv->lock);
2620 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
2624 cm_id_priv->id.state = IB_CM_ESTABLISHED;
2626 ib_cancel_mad(cm_id_priv->msg);
2627 cm_queue_work_unlock(cm_id_priv, work);
2630 cm_deref_id(cm_id_priv);
2634 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2635 struct cm_id_private *cm_id_priv,
2636 const void *private_data,
2637 u8 private_data_len)
2639 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2640 cm_form_tid(cm_id_priv));
2641 IBA_SET(CM_DREQ_LOCAL_COMM_ID, dreq_msg,
2642 be32_to_cpu(cm_id_priv->id.local_id));
2643 IBA_SET(CM_DREQ_REMOTE_COMM_ID, dreq_msg,
2644 be32_to_cpu(cm_id_priv->id.remote_id));
2645 IBA_SET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg,
2646 be32_to_cpu(cm_id_priv->remote_qpn));
2648 if (private_data && private_data_len)
2649 IBA_SET_MEM(CM_DREQ_PRIVATE_DATA, dreq_msg, private_data,
2653 static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
2654 const void *private_data, u8 private_data_len)
2656 struct ib_mad_send_buf *msg;
2659 lockdep_assert_held(&cm_id_priv->lock);
2661 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2664 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2665 trace_icm_dreq_skipped(&cm_id_priv->id);
2669 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2670 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2671 ib_cancel_mad(cm_id_priv->msg);
2673 msg = cm_alloc_priv_msg(cm_id_priv);
2675 cm_enter_timewait(cm_id_priv);
2676 return PTR_ERR(msg);
2679 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2680 private_data, private_data_len);
2681 msg->timeout_ms = cm_id_priv->timeout_ms;
2682 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2684 trace_icm_send_dreq(&cm_id_priv->id);
2685 ret = ib_post_send_mad(msg, NULL);
2687 cm_enter_timewait(cm_id_priv);
2688 cm_free_priv_msg(msg);
2692 cm_id_priv->id.state = IB_CM_DREQ_SENT;
2696 int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data,
2697 u8 private_data_len)
2699 struct cm_id_private *cm_id_priv =
2700 container_of(cm_id, struct cm_id_private, id);
2701 unsigned long flags;
2704 spin_lock_irqsave(&cm_id_priv->lock, flags);
2705 ret = cm_send_dreq_locked(cm_id_priv, private_data, private_data_len);
2706 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2709 EXPORT_SYMBOL(ib_send_cm_dreq);
2711 static void cm_format_drep(struct cm_drep_msg *drep_msg,
2712 struct cm_id_private *cm_id_priv,
2713 const void *private_data,
2714 u8 private_data_len)
2716 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2717 IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2718 be32_to_cpu(cm_id_priv->id.local_id));
2719 IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2720 be32_to_cpu(cm_id_priv->id.remote_id));
2722 if (private_data && private_data_len)
2723 IBA_SET_MEM(CM_DREP_PRIVATE_DATA, drep_msg, private_data,
2727 static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
2728 void *private_data, u8 private_data_len)
2730 struct ib_mad_send_buf *msg;
2733 lockdep_assert_held(&cm_id_priv->lock);
2735 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2738 if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2739 trace_icm_send_drep_err(&cm_id_priv->id);
2740 kfree(private_data);
2744 cm_set_private_data(cm_id_priv, private_data, private_data_len);
2745 cm_enter_timewait(cm_id_priv);
2747 msg = cm_alloc_msg(cm_id_priv);
2749 return PTR_ERR(msg);
2751 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2752 private_data, private_data_len);
2754 trace_icm_send_drep(&cm_id_priv->id);
2755 ret = ib_post_send_mad(msg, NULL);
2763 int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data,
2764 u8 private_data_len)
2766 struct cm_id_private *cm_id_priv =
2767 container_of(cm_id, struct cm_id_private, id);
2768 unsigned long flags;
2772 data = cm_copy_private_data(private_data, private_data_len);
2774 return PTR_ERR(data);
2776 spin_lock_irqsave(&cm_id_priv->lock, flags);
2777 ret = cm_send_drep_locked(cm_id_priv, data, private_data_len);
2778 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2781 EXPORT_SYMBOL(ib_send_cm_drep);
2783 static int cm_issue_drep(struct cm_port *port,
2784 struct ib_mad_recv_wc *mad_recv_wc)
2786 struct ib_mad_send_buf *msg = NULL;
2787 struct cm_dreq_msg *dreq_msg;
2788 struct cm_drep_msg *drep_msg;
2791 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2795 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2796 drep_msg = (struct cm_drep_msg *) msg->mad;
2798 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2799 IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2800 IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg));
2801 IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2802 IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2804 trace_icm_issue_drep(
2805 IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
2806 IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2807 ret = ib_post_send_mad(msg, NULL);
2809 cm_free_response_msg(msg);
2814 static int cm_dreq_handler(struct cm_work *work)
2816 struct cm_id_private *cm_id_priv;
2817 struct cm_dreq_msg *dreq_msg;
2818 struct ib_mad_send_buf *msg = NULL;
2820 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2821 cm_id_priv = cm_acquire_id(
2822 cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)),
2823 cpu_to_be32(IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg)));
2825 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
2827 cm_issue_drep(work->port, work->mad_recv_wc);
2828 trace_icm_no_priv_err(
2829 IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
2830 IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2834 work->cm_event.private_data =
2835 IBA_GET_MEM_PTR(CM_DREQ_PRIVATE_DATA, dreq_msg);
2837 spin_lock_irq(&cm_id_priv->lock);
2838 if (cm_id_priv->local_qpn !=
2839 cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg)))
2842 switch (cm_id_priv->id.state) {
2843 case IB_CM_REP_SENT:
2844 case IB_CM_DREQ_SENT:
2845 case IB_CM_MRA_REP_RCVD:
2846 ib_cancel_mad(cm_id_priv->msg);
2848 case IB_CM_ESTABLISHED:
2849 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2850 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2851 ib_cancel_mad(cm_id_priv->msg);
2853 case IB_CM_TIMEWAIT:
2854 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
2856 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
2860 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2861 cm_id_priv->private_data,
2862 cm_id_priv->private_data_len);
2863 spin_unlock_irq(&cm_id_priv->lock);
2865 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
2866 ib_post_send_mad(msg, NULL))
2867 cm_free_response_msg(msg);
2869 case IB_CM_DREQ_RCVD:
2870 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
2874 trace_icm_dreq_unknown_err(&cm_id_priv->id);
2877 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2878 cm_id_priv->tid = dreq_msg->hdr.tid;
2879 cm_queue_work_unlock(cm_id_priv, work);
2882 unlock: spin_unlock_irq(&cm_id_priv->lock);
2883 deref: cm_deref_id(cm_id_priv);
2887 static int cm_drep_handler(struct cm_work *work)
2889 struct cm_id_private *cm_id_priv;
2890 struct cm_drep_msg *drep_msg;
2892 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2893 cm_id_priv = cm_acquire_id(
2894 cpu_to_be32(IBA_GET(CM_DREP_REMOTE_COMM_ID, drep_msg)),
2895 cpu_to_be32(IBA_GET(CM_DREP_LOCAL_COMM_ID, drep_msg)));
2899 work->cm_event.private_data =
2900 IBA_GET_MEM_PTR(CM_DREP_PRIVATE_DATA, drep_msg);
2902 spin_lock_irq(&cm_id_priv->lock);
2903 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2904 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2905 spin_unlock_irq(&cm_id_priv->lock);
2908 cm_enter_timewait(cm_id_priv);
2910 ib_cancel_mad(cm_id_priv->msg);
2911 cm_queue_work_unlock(cm_id_priv, work);
2914 cm_deref_id(cm_id_priv);
2918 static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
2919 enum ib_cm_rej_reason reason, void *ari,
2920 u8 ari_length, const void *private_data,
2921 u8 private_data_len)
2923 enum ib_cm_state state = cm_id_priv->id.state;
2924 struct ib_mad_send_buf *msg;
2927 lockdep_assert_held(&cm_id_priv->lock);
2929 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2930 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2933 trace_icm_send_rej(&cm_id_priv->id, reason);
2936 case IB_CM_REQ_SENT:
2937 case IB_CM_MRA_REQ_RCVD:
2938 case IB_CM_REQ_RCVD:
2939 case IB_CM_MRA_REQ_SENT:
2940 case IB_CM_REP_RCVD:
2941 case IB_CM_MRA_REP_SENT:
2942 cm_reset_to_idle(cm_id_priv);
2943 msg = cm_alloc_msg(cm_id_priv);
2945 return PTR_ERR(msg);
2946 cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2947 ari, ari_length, private_data, private_data_len,
2950 case IB_CM_REP_SENT:
2951 case IB_CM_MRA_REP_RCVD:
2952 cm_enter_timewait(cm_id_priv);
2953 msg = cm_alloc_msg(cm_id_priv);
2955 return PTR_ERR(msg);
2956 cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2957 ari, ari_length, private_data, private_data_len,
2961 trace_icm_send_unknown_rej_err(&cm_id_priv->id);
2965 ret = ib_post_send_mad(msg, NULL);
2974 int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason,
2975 void *ari, u8 ari_length, const void *private_data,
2976 u8 private_data_len)
2978 struct cm_id_private *cm_id_priv =
2979 container_of(cm_id, struct cm_id_private, id);
2980 unsigned long flags;
2983 spin_lock_irqsave(&cm_id_priv->lock, flags);
2984 ret = cm_send_rej_locked(cm_id_priv, reason, ari, ari_length,
2985 private_data, private_data_len);
2986 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2989 EXPORT_SYMBOL(ib_send_cm_rej);
2991 static void cm_format_rej_event(struct cm_work *work)
2993 struct cm_rej_msg *rej_msg;
2994 struct ib_cm_rej_event_param *param;
2996 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2997 param = &work->cm_event.param.rej_rcvd;
2998 param->ari = IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg);
2999 param->ari_length = IBA_GET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg);
3000 param->reason = IBA_GET(CM_REJ_REASON, rej_msg);
3001 work->cm_event.private_data =
3002 IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg);
3005 static struct cm_id_private *cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
3007 struct cm_id_private *cm_id_priv;
3010 remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg));
3012 if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) {
3013 cm_id_priv = cm_find_remote_id(
3014 *((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)),
3016 } else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) ==
3017 CM_MSG_RESPONSE_REQ)
3018 cm_id_priv = cm_acquire_id(
3019 cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3022 cm_id_priv = cm_acquire_id(
3023 cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3029 static int cm_rej_handler(struct cm_work *work)
3031 struct cm_id_private *cm_id_priv;
3032 struct cm_rej_msg *rej_msg;
3034 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
3035 cm_id_priv = cm_acquire_rejected_id(rej_msg);
3039 cm_format_rej_event(work);
3041 spin_lock_irq(&cm_id_priv->lock);
3042 switch (cm_id_priv->id.state) {
3043 case IB_CM_REQ_SENT:
3044 case IB_CM_MRA_REQ_RCVD:
3045 case IB_CM_REP_SENT:
3046 case IB_CM_MRA_REP_RCVD:
3047 ib_cancel_mad(cm_id_priv->msg);
3049 case IB_CM_REQ_RCVD:
3050 case IB_CM_MRA_REQ_SENT:
3051 if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN)
3052 cm_enter_timewait(cm_id_priv);
3054 cm_reset_to_idle(cm_id_priv);
3056 case IB_CM_DREQ_SENT:
3057 ib_cancel_mad(cm_id_priv->msg);
3059 case IB_CM_REP_RCVD:
3060 case IB_CM_MRA_REP_SENT:
3061 cm_enter_timewait(cm_id_priv);
3063 case IB_CM_ESTABLISHED:
3064 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
3065 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
3066 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
3067 ib_cancel_mad(cm_id_priv->msg);
3068 cm_enter_timewait(cm_id_priv);
3073 trace_icm_rej_unknown_err(&cm_id_priv->id);
3074 spin_unlock_irq(&cm_id_priv->lock);
3078 cm_queue_work_unlock(cm_id_priv, work);
3081 cm_deref_id(cm_id_priv);
3085 int ib_send_cm_mra(struct ib_cm_id *cm_id,
3087 const void *private_data,
3088 u8 private_data_len)
3090 struct cm_id_private *cm_id_priv;
3091 struct ib_mad_send_buf *msg;
3092 enum ib_cm_state cm_state;
3093 enum ib_cm_lap_state lap_state;
3094 enum cm_msg_response msg_response;
3096 unsigned long flags;
3099 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
3102 data = cm_copy_private_data(private_data, private_data_len);
3104 return PTR_ERR(data);
3106 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3108 spin_lock_irqsave(&cm_id_priv->lock, flags);
3109 switch (cm_id_priv->id.state) {
3110 case IB_CM_REQ_RCVD:
3111 cm_state = IB_CM_MRA_REQ_SENT;
3112 lap_state = cm_id->lap_state;
3113 msg_response = CM_MSG_RESPONSE_REQ;
3115 case IB_CM_REP_RCVD:
3116 cm_state = IB_CM_MRA_REP_SENT;
3117 lap_state = cm_id->lap_state;
3118 msg_response = CM_MSG_RESPONSE_REP;
3120 case IB_CM_ESTABLISHED:
3121 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
3122 cm_state = cm_id->state;
3123 lap_state = IB_CM_MRA_LAP_SENT;
3124 msg_response = CM_MSG_RESPONSE_OTHER;
3129 trace_icm_send_mra_unknown_err(&cm_id_priv->id);
3134 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
3135 msg = cm_alloc_msg(cm_id_priv);
3141 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3142 msg_response, service_timeout,
3143 private_data, private_data_len);
3144 trace_icm_send_mra(cm_id);
3145 ret = ib_post_send_mad(msg, NULL);
3147 goto error_free_msg;
3150 cm_id->state = cm_state;
3151 cm_id->lap_state = lap_state;
3152 cm_id_priv->service_timeout = service_timeout;
3153 cm_set_private_data(cm_id_priv, data, private_data_len);
3154 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3160 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3164 EXPORT_SYMBOL(ib_send_cm_mra);
3166 static struct cm_id_private *cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
3168 switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) {
3169 case CM_MSG_RESPONSE_REQ:
3170 return cm_acquire_id(
3171 cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3173 case CM_MSG_RESPONSE_REP:
3174 case CM_MSG_RESPONSE_OTHER:
3175 return cm_acquire_id(
3176 cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3177 cpu_to_be32(IBA_GET(CM_MRA_LOCAL_COMM_ID, mra_msg)));
3183 static int cm_mra_handler(struct cm_work *work)
3185 struct cm_id_private *cm_id_priv;
3186 struct cm_mra_msg *mra_msg;
3189 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
3190 cm_id_priv = cm_acquire_mraed_id(mra_msg);
3194 work->cm_event.private_data =
3195 IBA_GET_MEM_PTR(CM_MRA_PRIVATE_DATA, mra_msg);
3196 work->cm_event.param.mra_rcvd.service_timeout =
3197 IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg);
3198 timeout = cm_convert_to_ms(IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg)) +
3199 cm_convert_to_ms(cm_id_priv->av.timeout);
3201 spin_lock_irq(&cm_id_priv->lock);
3202 switch (cm_id_priv->id.state) {
3203 case IB_CM_REQ_SENT:
3204 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3205 CM_MSG_RESPONSE_REQ ||
3206 ib_modify_mad(cm_id_priv->msg, timeout))
3208 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
3210 case IB_CM_REP_SENT:
3211 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3212 CM_MSG_RESPONSE_REP ||
3213 ib_modify_mad(cm_id_priv->msg, timeout))
3215 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
3217 case IB_CM_ESTABLISHED:
3218 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3219 CM_MSG_RESPONSE_OTHER ||
3220 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
3221 ib_modify_mad(cm_id_priv->msg, timeout)) {
3222 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
3224 &work->port->counters[CM_RECV_DUPLICATES]
3228 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
3230 case IB_CM_MRA_REQ_RCVD:
3231 case IB_CM_MRA_REP_RCVD:
3232 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
3236 trace_icm_mra_unknown_err(&cm_id_priv->id);
3240 cm_id_priv->msg->context[1] = (void *) (unsigned long)
3241 cm_id_priv->id.state;
3242 cm_queue_work_unlock(cm_id_priv, work);
3245 spin_unlock_irq(&cm_id_priv->lock);
3246 cm_deref_id(cm_id_priv);
3250 static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
3251 struct sa_path_rec *path)
3255 if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
3256 sa_path_set_dlid(path, IBA_GET(CM_LAP_ALTERNATE_LOCAL_PORT_LID,
3258 sa_path_set_slid(path, IBA_GET(CM_LAP_ALTERNATE_REMOTE_PORT_LID,
3261 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3262 CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg));
3263 sa_path_set_dlid(path, lid);
3265 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3266 CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg));
3267 sa_path_set_slid(path, lid);
3271 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
3272 struct sa_path_rec *path,
3273 struct cm_lap_msg *lap_msg)
3275 path->dgid = *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg);
3277 *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg);
3279 cpu_to_be32(IBA_GET(CM_LAP_ALTERNATE_FLOW_LABEL, lap_msg));
3280 path->hop_limit = IBA_GET(CM_LAP_ALTERNATE_HOP_LIMIT, lap_msg);
3281 path->traffic_class = IBA_GET(CM_LAP_ALTERNATE_TRAFFIC_CLASS, lap_msg);
3282 path->reversible = 1;
3283 path->pkey = cm_id_priv->pkey;
3284 path->sl = IBA_GET(CM_LAP_ALTERNATE_SL, lap_msg);
3285 path->mtu_selector = IB_SA_EQ;
3286 path->mtu = cm_id_priv->path_mtu;
3287 path->rate_selector = IB_SA_EQ;
3288 path->rate = IBA_GET(CM_LAP_ALTERNATE_PACKET_RATE, lap_msg);
3289 path->packet_life_time_selector = IB_SA_EQ;
3290 path->packet_life_time =
3291 IBA_GET(CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT, lap_msg);
3292 path->packet_life_time -= (path->packet_life_time > 0);
3293 cm_format_path_lid_from_lap(lap_msg, path);
3296 static int cm_lap_handler(struct cm_work *work)
3298 struct cm_id_private *cm_id_priv;
3299 struct cm_lap_msg *lap_msg;
3300 struct ib_cm_lap_event_param *param;
3301 struct ib_mad_send_buf *msg = NULL;
3302 struct rdma_ah_attr ah_attr;
3303 struct cm_av alt_av = {};
3306 /* Currently Alternate path messages are not supported for
3309 if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3310 work->port->port_num))
3313 /* todo: verify LAP request and send reject APR if invalid. */
3314 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
3315 cm_id_priv = cm_acquire_id(
3316 cpu_to_be32(IBA_GET(CM_LAP_REMOTE_COMM_ID, lap_msg)),
3317 cpu_to_be32(IBA_GET(CM_LAP_LOCAL_COMM_ID, lap_msg)));
3321 param = &work->cm_event.param.lap_rcvd;
3322 memset(&work->path[0], 0, sizeof(work->path[1]));
3323 cm_path_set_rec_type(work->port->cm_dev->ib_device,
3324 work->port->port_num, &work->path[0],
3325 IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID,
3327 param->alternate_path = &work->path[0];
3328 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
3329 work->cm_event.private_data =
3330 IBA_GET_MEM_PTR(CM_LAP_PRIVATE_DATA, lap_msg);
3332 ret = ib_init_ah_attr_from_wc(work->port->cm_dev->ib_device,
3333 work->port->port_num,
3334 work->mad_recv_wc->wc,
3335 work->mad_recv_wc->recv_buf.grh,
3340 ret = cm_init_av_by_path(param->alternate_path, NULL, &alt_av);
3342 rdma_destroy_ah_attr(&ah_attr);
3346 spin_lock_irq(&cm_id_priv->lock);
3347 cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
3348 &ah_attr, &cm_id_priv->av);
3349 cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av);
3351 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
3354 switch (cm_id_priv->id.lap_state) {
3355 case IB_CM_LAP_UNINIT:
3356 case IB_CM_LAP_IDLE:
3358 case IB_CM_MRA_LAP_SENT:
3359 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
3361 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
3365 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3366 CM_MSG_RESPONSE_OTHER,
3367 cm_id_priv->service_timeout,
3368 cm_id_priv->private_data,
3369 cm_id_priv->private_data_len);
3370 spin_unlock_irq(&cm_id_priv->lock);
3372 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
3373 ib_post_send_mad(msg, NULL))
3374 cm_free_response_msg(msg);
3376 case IB_CM_LAP_RCVD:
3377 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
3384 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
3385 cm_id_priv->tid = lap_msg->hdr.tid;
3386 cm_queue_work_unlock(cm_id_priv, work);
3389 unlock: spin_unlock_irq(&cm_id_priv->lock);
3390 deref: cm_deref_id(cm_id_priv);
3394 static int cm_apr_handler(struct cm_work *work)
3396 struct cm_id_private *cm_id_priv;
3397 struct cm_apr_msg *apr_msg;
3399 /* Currently Alternate path messages are not supported for
3402 if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3403 work->port->port_num))
3406 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
3407 cm_id_priv = cm_acquire_id(
3408 cpu_to_be32(IBA_GET(CM_APR_REMOTE_COMM_ID, apr_msg)),
3409 cpu_to_be32(IBA_GET(CM_APR_LOCAL_COMM_ID, apr_msg)));
3411 return -EINVAL; /* Unmatched reply. */
3413 work->cm_event.param.apr_rcvd.ap_status =
3414 IBA_GET(CM_APR_AR_STATUS, apr_msg);
3415 work->cm_event.param.apr_rcvd.apr_info =
3416 IBA_GET_MEM_PTR(CM_APR_ADDITIONAL_INFORMATION, apr_msg);
3417 work->cm_event.param.apr_rcvd.info_len =
3418 IBA_GET(CM_APR_ADDITIONAL_INFORMATION_LENGTH, apr_msg);
3419 work->cm_event.private_data =
3420 IBA_GET_MEM_PTR(CM_APR_PRIVATE_DATA, apr_msg);
3422 spin_lock_irq(&cm_id_priv->lock);
3423 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
3424 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
3425 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
3426 spin_unlock_irq(&cm_id_priv->lock);
3429 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
3430 ib_cancel_mad(cm_id_priv->msg);
3431 cm_queue_work_unlock(cm_id_priv, work);
3434 cm_deref_id(cm_id_priv);
3438 static int cm_timewait_handler(struct cm_work *work)
3440 struct cm_timewait_info *timewait_info;
3441 struct cm_id_private *cm_id_priv;
3443 timewait_info = container_of(work, struct cm_timewait_info, work);
3444 spin_lock_irq(&cm.lock);
3445 list_del(&timewait_info->list);
3446 spin_unlock_irq(&cm.lock);
3448 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
3449 timewait_info->work.remote_id);
3453 spin_lock_irq(&cm_id_priv->lock);
3454 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
3455 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
3456 spin_unlock_irq(&cm_id_priv->lock);
3459 cm_id_priv->id.state = IB_CM_IDLE;
3460 cm_queue_work_unlock(cm_id_priv, work);
3463 cm_deref_id(cm_id_priv);
3467 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
3468 struct cm_id_private *cm_id_priv,
3469 struct ib_cm_sidr_req_param *param)
3471 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
3472 cm_form_tid(cm_id_priv));
3473 IBA_SET(CM_SIDR_REQ_REQUESTID, sidr_req_msg,
3474 be32_to_cpu(cm_id_priv->id.local_id));
3475 IBA_SET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg,
3476 be16_to_cpu(param->path->pkey));
3477 IBA_SET(CM_SIDR_REQ_SERVICEID, sidr_req_msg,
3478 be64_to_cpu(param->service_id));
3480 if (param->private_data && param->private_data_len)
3481 IBA_SET_MEM(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg,
3482 param->private_data, param->private_data_len);
3485 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3486 struct ib_cm_sidr_req_param *param)
3488 struct cm_id_private *cm_id_priv;
3489 struct ib_mad_send_buf *msg;
3490 struct cm_av av = {};
3491 unsigned long flags;
3494 if (!param->path || (param->private_data &&
3495 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3498 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3499 ret = cm_init_av_by_path(param->path, param->sgid_attr, &av);
3503 spin_lock_irqsave(&cm_id_priv->lock, flags);
3504 cm_move_av_from_path(&cm_id_priv->av, &av);
3505 cm_id->service_id = param->service_id;
3506 cm_id_priv->timeout_ms = param->timeout_ms;
3507 cm_id_priv->max_cm_retries = param->max_cm_retries;
3508 if (cm_id->state != IB_CM_IDLE) {
3513 msg = cm_alloc_priv_msg(cm_id_priv);
3519 cm_format_sidr_req((struct cm_sidr_req_msg *)msg->mad, cm_id_priv,
3521 msg->timeout_ms = cm_id_priv->timeout_ms;
3522 msg->context[1] = (void *)(unsigned long)IB_CM_SIDR_REQ_SENT;
3524 trace_icm_send_sidr_req(&cm_id_priv->id);
3525 ret = ib_post_send_mad(msg, NULL);
3528 cm_id->state = IB_CM_SIDR_REQ_SENT;
3529 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3532 cm_free_priv_msg(msg);
3534 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3537 EXPORT_SYMBOL(ib_send_cm_sidr_req);
3539 static void cm_format_sidr_req_event(struct cm_work *work,
3540 const struct cm_id_private *rx_cm_id,
3541 struct ib_cm_id *listen_id)
3543 struct cm_sidr_req_msg *sidr_req_msg;
3544 struct ib_cm_sidr_req_event_param *param;
3546 sidr_req_msg = (struct cm_sidr_req_msg *)
3547 work->mad_recv_wc->recv_buf.mad;
3548 param = &work->cm_event.param.sidr_req_rcvd;
3549 param->pkey = IBA_GET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg);
3550 param->listen_id = listen_id;
3552 cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
3553 param->bth_pkey = cm_get_bth_pkey(work);
3554 param->port = work->port->port_num;
3555 param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr;
3556 work->cm_event.private_data =
3557 IBA_GET_MEM_PTR(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg);
3560 static int cm_sidr_req_handler(struct cm_work *work)
3562 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
3563 struct cm_sidr_req_msg *sidr_req_msg;
3568 cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
3569 if (IS_ERR(cm_id_priv))
3570 return PTR_ERR(cm_id_priv);
3572 /* Record SGID/SLID and request ID for lookup. */
3573 sidr_req_msg = (struct cm_sidr_req_msg *)
3574 work->mad_recv_wc->recv_buf.mad;
3576 cm_id_priv->id.remote_id =
3577 cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
3578 cm_id_priv->id.service_id =
3579 cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
3580 cm_id_priv->tid = sidr_req_msg->hdr.tid;
3582 wc = work->mad_recv_wc->wc;
3583 cm_id_priv->sidr_slid = wc->slid;
3584 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3585 work->mad_recv_wc->recv_buf.grh,
3590 spin_lock_irq(&cm.lock);
3591 listen_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3592 if (listen_cm_id_priv) {
3593 spin_unlock_irq(&cm.lock);
3594 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
3595 [CM_SIDR_REQ_COUNTER]);
3596 goto out; /* Duplicate message. */
3598 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3599 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
3600 cm_id_priv->id.service_id);
3601 if (!listen_cm_id_priv) {
3602 spin_unlock_irq(&cm.lock);
3603 ib_send_cm_sidr_rep(&cm_id_priv->id,
3604 &(struct ib_cm_sidr_rep_param){
3605 .status = IB_SIDR_UNSUPPORTED });
3606 goto out; /* No match. */
3608 spin_unlock_irq(&cm.lock);
3610 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
3611 cm_id_priv->id.context = listen_cm_id_priv->id.context;
3614 * A SIDR ID does not need to be in the xarray since it does not receive
3615 * mads, is not placed in the remote_id or remote_qpn rbtree, and does
3616 * not enter timewait.
3619 cm_format_sidr_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
3620 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
3623 * A pointer to the listen_cm_id is held in the event, so this deref
3624 * must be after the event is delivered above.
3626 cm_deref_id(listen_cm_id_priv);
3628 cm_destroy_id(&cm_id_priv->id, ret);
3631 ib_destroy_cm_id(&cm_id_priv->id);
3635 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3636 struct cm_id_private *cm_id_priv,
3637 struct ib_cm_sidr_rep_param *param)
3639 cm_format_mad_ece_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3640 cm_id_priv->tid, param->ece.attr_mod);
3641 IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg,
3642 be32_to_cpu(cm_id_priv->id.remote_id));
3643 IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status);
3644 IBA_SET(CM_SIDR_REP_QPN, sidr_rep_msg, param->qp_num);
3645 IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg,
3646 be64_to_cpu(cm_id_priv->id.service_id));
3647 IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey);
3648 IBA_SET(CM_SIDR_REP_VENDOR_ID_L, sidr_rep_msg,
3649 param->ece.vendor_id & 0xFF);
3650 IBA_SET(CM_SIDR_REP_VENDOR_ID_H, sidr_rep_msg,
3651 (param->ece.vendor_id >> 8) & 0xFF);
3653 if (param->info && param->info_length)
3654 IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg,
3655 param->info, param->info_length);
3657 if (param->private_data && param->private_data_len)
3658 IBA_SET_MEM(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg,
3659 param->private_data, param->private_data_len);
3662 static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
3663 struct ib_cm_sidr_rep_param *param)
3665 struct ib_mad_send_buf *msg;
3666 unsigned long flags;
3669 lockdep_assert_held(&cm_id_priv->lock);
3671 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3672 (param->private_data &&
3673 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3676 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD)
3679 msg = cm_alloc_msg(cm_id_priv);
3681 return PTR_ERR(msg);
3683 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3685 trace_icm_send_sidr_rep(&cm_id_priv->id);
3686 ret = ib_post_send_mad(msg, NULL);
3691 cm_id_priv->id.state = IB_CM_IDLE;
3692 spin_lock_irqsave(&cm.lock, flags);
3693 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3694 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3695 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3697 spin_unlock_irqrestore(&cm.lock, flags);
3701 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3702 struct ib_cm_sidr_rep_param *param)
3704 struct cm_id_private *cm_id_priv =
3705 container_of(cm_id, struct cm_id_private, id);
3706 unsigned long flags;
3709 spin_lock_irqsave(&cm_id_priv->lock, flags);
3710 ret = cm_send_sidr_rep_locked(cm_id_priv, param);
3711 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3714 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3716 static void cm_format_sidr_rep_event(struct cm_work *work,
3717 const struct cm_id_private *cm_id_priv)
3719 struct cm_sidr_rep_msg *sidr_rep_msg;
3720 struct ib_cm_sidr_rep_event_param *param;
3722 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3723 work->mad_recv_wc->recv_buf.mad;
3724 param = &work->cm_event.param.sidr_rep_rcvd;
3725 param->status = IBA_GET(CM_SIDR_REP_STATUS, sidr_rep_msg);
3726 param->qkey = IBA_GET(CM_SIDR_REP_Q_KEY, sidr_rep_msg);
3727 param->qpn = IBA_GET(CM_SIDR_REP_QPN, sidr_rep_msg);
3728 param->info = IBA_GET_MEM_PTR(CM_SIDR_REP_ADDITIONAL_INFORMATION,
3730 param->info_len = IBA_GET(CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH,
3732 param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
3733 work->cm_event.private_data =
3734 IBA_GET_MEM_PTR(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg);
3737 static int cm_sidr_rep_handler(struct cm_work *work)
3739 struct cm_sidr_rep_msg *sidr_rep_msg;
3740 struct cm_id_private *cm_id_priv;
3742 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3743 work->mad_recv_wc->recv_buf.mad;
3744 cm_id_priv = cm_acquire_id(
3745 cpu_to_be32(IBA_GET(CM_SIDR_REP_REQUESTID, sidr_rep_msg)), 0);
3747 return -EINVAL; /* Unmatched reply. */
3749 spin_lock_irq(&cm_id_priv->lock);
3750 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3751 spin_unlock_irq(&cm_id_priv->lock);
3754 cm_id_priv->id.state = IB_CM_IDLE;
3755 ib_cancel_mad(cm_id_priv->msg);
3756 spin_unlock_irq(&cm_id_priv->lock);
3758 cm_format_sidr_rep_event(work, cm_id_priv);
3759 cm_process_work(cm_id_priv, work);
3762 cm_deref_id(cm_id_priv);
3766 static void cm_process_send_error(struct cm_id_private *cm_id_priv,
3767 struct ib_mad_send_buf *msg,
3768 enum ib_cm_state state,
3769 enum ib_wc_status wc_status)
3771 struct ib_cm_event cm_event = {};
3774 /* Discard old sends or ones without a response. */
3775 spin_lock_irq(&cm_id_priv->lock);
3776 if (msg != cm_id_priv->msg) {
3777 spin_unlock_irq(&cm_id_priv->lock);
3781 cm_free_priv_msg(msg);
3783 if (state != cm_id_priv->id.state || wc_status == IB_WC_SUCCESS ||
3784 wc_status == IB_WC_WR_FLUSH_ERR)
3787 trace_icm_mad_send_err(state, wc_status);
3789 case IB_CM_REQ_SENT:
3790 case IB_CM_MRA_REQ_RCVD:
3791 cm_reset_to_idle(cm_id_priv);
3792 cm_event.event = IB_CM_REQ_ERROR;
3794 case IB_CM_REP_SENT:
3795 case IB_CM_MRA_REP_RCVD:
3796 cm_reset_to_idle(cm_id_priv);
3797 cm_event.event = IB_CM_REP_ERROR;
3799 case IB_CM_DREQ_SENT:
3800 cm_enter_timewait(cm_id_priv);
3801 cm_event.event = IB_CM_DREQ_ERROR;
3803 case IB_CM_SIDR_REQ_SENT:
3804 cm_id_priv->id.state = IB_CM_IDLE;
3805 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3810 spin_unlock_irq(&cm_id_priv->lock);
3811 cm_event.param.send_status = wc_status;
3813 /* No other events can occur on the cm_id at this point. */
3814 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3816 ib_destroy_cm_id(&cm_id_priv->id);
3819 spin_unlock_irq(&cm_id_priv->lock);
3822 static void cm_send_handler(struct ib_mad_agent *mad_agent,
3823 struct ib_mad_send_wc *mad_send_wc)
3825 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3826 struct cm_id_private *cm_id_priv = msg->context[0];
3827 enum ib_cm_state state =
3828 (enum ib_cm_state)(unsigned long)msg->context[1];
3829 struct cm_port *port;
3832 port = mad_agent->context;
3833 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3834 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3837 * If the send was in response to a received message (context[0] is not
3838 * set to a cm_id), and is not a REJ, then it is a send that was
3841 if (!cm_id_priv && (attr_index != CM_REJ_COUNTER))
3844 atomic_long_add(1 + msg->retries, &port->counters[CM_XMIT][attr_index]);
3846 atomic_long_add(msg->retries,
3847 &port->counters[CM_XMIT_RETRIES][attr_index]);
3850 cm_process_send_error(cm_id_priv, msg, state,
3851 mad_send_wc->status);
3853 cm_free_response_msg(msg);
3856 static void cm_work_handler(struct work_struct *_work)
3858 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3861 switch (work->cm_event.event) {
3862 case IB_CM_REQ_RECEIVED:
3863 ret = cm_req_handler(work);
3865 case IB_CM_MRA_RECEIVED:
3866 ret = cm_mra_handler(work);
3868 case IB_CM_REJ_RECEIVED:
3869 ret = cm_rej_handler(work);
3871 case IB_CM_REP_RECEIVED:
3872 ret = cm_rep_handler(work);
3874 case IB_CM_RTU_RECEIVED:
3875 ret = cm_rtu_handler(work);
3877 case IB_CM_USER_ESTABLISHED:
3878 ret = cm_establish_handler(work);
3880 case IB_CM_DREQ_RECEIVED:
3881 ret = cm_dreq_handler(work);
3883 case IB_CM_DREP_RECEIVED:
3884 ret = cm_drep_handler(work);
3886 case IB_CM_SIDR_REQ_RECEIVED:
3887 ret = cm_sidr_req_handler(work);
3889 case IB_CM_SIDR_REP_RECEIVED:
3890 ret = cm_sidr_rep_handler(work);
3892 case IB_CM_LAP_RECEIVED:
3893 ret = cm_lap_handler(work);
3895 case IB_CM_APR_RECEIVED:
3896 ret = cm_apr_handler(work);
3898 case IB_CM_TIMEWAIT_EXIT:
3899 ret = cm_timewait_handler(work);
3902 trace_icm_handler_err(work->cm_event.event);
3910 static int cm_establish(struct ib_cm_id *cm_id)
3912 struct cm_id_private *cm_id_priv;
3913 struct cm_work *work;
3914 unsigned long flags;
3916 struct cm_device *cm_dev;
3918 cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3922 work = kmalloc(sizeof *work, GFP_ATOMIC);
3926 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3927 spin_lock_irqsave(&cm_id_priv->lock, flags);
3928 switch (cm_id->state) {
3929 case IB_CM_REP_SENT:
3930 case IB_CM_MRA_REP_RCVD:
3931 cm_id->state = IB_CM_ESTABLISHED;
3933 case IB_CM_ESTABLISHED:
3937 trace_icm_establish_err(cm_id);
3941 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3949 * The CM worker thread may try to destroy the cm_id before it
3950 * can execute this work item. To prevent potential deadlock,
3951 * we need to find the cm_id once we're in the context of the
3952 * worker thread, rather than holding a reference on it.
3954 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3955 work->local_id = cm_id->local_id;
3956 work->remote_id = cm_id->remote_id;
3957 work->mad_recv_wc = NULL;
3958 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3960 /* Check if the device started its remove_one */
3961 spin_lock_irqsave(&cm.lock, flags);
3962 if (!cm_dev->going_down) {
3963 queue_delayed_work(cm.wq, &work->work, 0);
3968 spin_unlock_irqrestore(&cm.lock, flags);
3974 static int cm_migrate(struct ib_cm_id *cm_id)
3976 struct cm_id_private *cm_id_priv;
3977 unsigned long flags;
3980 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3981 spin_lock_irqsave(&cm_id_priv->lock, flags);
3982 if (cm_id->state == IB_CM_ESTABLISHED &&
3983 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3984 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3985 cm_id->lap_state = IB_CM_LAP_IDLE;
3986 cm_id_priv->av = cm_id_priv->alt_av;
3989 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3994 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3999 case IB_EVENT_COMM_EST:
4000 ret = cm_establish(cm_id);
4002 case IB_EVENT_PATH_MIG:
4003 ret = cm_migrate(cm_id);
4010 EXPORT_SYMBOL(ib_cm_notify);
4012 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
4013 struct ib_mad_send_buf *send_buf,
4014 struct ib_mad_recv_wc *mad_recv_wc)
4016 struct cm_port *port = mad_agent->context;
4017 struct cm_work *work;
4018 enum ib_cm_event_type event;
4019 bool alt_path = false;
4024 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
4025 case CM_REQ_ATTR_ID:
4026 alt_path = cm_req_has_alt_path((struct cm_req_msg *)
4027 mad_recv_wc->recv_buf.mad);
4028 paths = 1 + (alt_path != 0);
4029 event = IB_CM_REQ_RECEIVED;
4031 case CM_MRA_ATTR_ID:
4032 event = IB_CM_MRA_RECEIVED;
4034 case CM_REJ_ATTR_ID:
4035 event = IB_CM_REJ_RECEIVED;
4037 case CM_REP_ATTR_ID:
4038 event = IB_CM_REP_RECEIVED;
4040 case CM_RTU_ATTR_ID:
4041 event = IB_CM_RTU_RECEIVED;
4043 case CM_DREQ_ATTR_ID:
4044 event = IB_CM_DREQ_RECEIVED;
4046 case CM_DREP_ATTR_ID:
4047 event = IB_CM_DREP_RECEIVED;
4049 case CM_SIDR_REQ_ATTR_ID:
4050 event = IB_CM_SIDR_REQ_RECEIVED;
4052 case CM_SIDR_REP_ATTR_ID:
4053 event = IB_CM_SIDR_REP_RECEIVED;
4055 case CM_LAP_ATTR_ID:
4057 event = IB_CM_LAP_RECEIVED;
4059 case CM_APR_ATTR_ID:
4060 event = IB_CM_APR_RECEIVED;
4063 ib_free_recv_mad(mad_recv_wc);
4067 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
4068 atomic_long_inc(&port->counters[CM_RECV][attr_id - CM_ATTR_ID_OFFSET]);
4070 work = kmalloc(struct_size(work, path, paths), GFP_KERNEL);
4072 ib_free_recv_mad(mad_recv_wc);
4076 INIT_DELAYED_WORK(&work->work, cm_work_handler);
4077 work->cm_event.event = event;
4078 work->mad_recv_wc = mad_recv_wc;
4081 /* Check if the device started its remove_one */
4082 spin_lock_irq(&cm.lock);
4083 if (!port->cm_dev->going_down)
4084 queue_delayed_work(cm.wq, &work->work, 0);
4087 spin_unlock_irq(&cm.lock);
4091 ib_free_recv_mad(mad_recv_wc);
4095 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
4096 struct ib_qp_attr *qp_attr,
4099 unsigned long flags;
4102 spin_lock_irqsave(&cm_id_priv->lock, flags);
4103 switch (cm_id_priv->id.state) {
4104 case IB_CM_REQ_SENT:
4105 case IB_CM_MRA_REQ_RCVD:
4106 case IB_CM_REQ_RCVD:
4107 case IB_CM_MRA_REQ_SENT:
4108 case IB_CM_REP_RCVD:
4109 case IB_CM_MRA_REP_SENT:
4110 case IB_CM_REP_SENT:
4111 case IB_CM_MRA_REP_RCVD:
4112 case IB_CM_ESTABLISHED:
4113 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
4114 IB_QP_PKEY_INDEX | IB_QP_PORT;
4115 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
4116 if (cm_id_priv->responder_resources) {
4117 struct ib_device *ib_dev = cm_id_priv->id.device;
4118 u64 support_flush = ib_dev->attrs.device_cap_flags &
4119 (IB_DEVICE_FLUSH_GLOBAL | IB_DEVICE_FLUSH_PERSISTENT);
4120 u32 flushable = support_flush ?
4121 (IB_ACCESS_FLUSH_GLOBAL |
4122 IB_ACCESS_FLUSH_PERSISTENT) : 0;
4124 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
4125 IB_ACCESS_REMOTE_ATOMIC |
4128 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
4129 if (cm_id_priv->av.port)
4130 qp_attr->port_num = cm_id_priv->av.port->port_num;
4134 trace_icm_qp_init_err(&cm_id_priv->id);
4138 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4142 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
4143 struct ib_qp_attr *qp_attr,
4146 unsigned long flags;
4149 spin_lock_irqsave(&cm_id_priv->lock, flags);
4150 switch (cm_id_priv->id.state) {
4151 case IB_CM_REQ_RCVD:
4152 case IB_CM_MRA_REQ_SENT:
4153 case IB_CM_REP_RCVD:
4154 case IB_CM_MRA_REP_SENT:
4155 case IB_CM_REP_SENT:
4156 case IB_CM_MRA_REP_RCVD:
4157 case IB_CM_ESTABLISHED:
4158 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
4159 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
4160 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
4161 if ((qp_attr->ah_attr.type == RDMA_AH_ATTR_TYPE_IB) &&
4162 cm_id_priv->av.dlid_datapath &&
4163 (cm_id_priv->av.dlid_datapath != 0xffff))
4164 qp_attr->ah_attr.ib.dlid = cm_id_priv->av.dlid_datapath;
4165 qp_attr->path_mtu = cm_id_priv->path_mtu;
4166 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
4167 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
4168 if (cm_id_priv->qp_type == IB_QPT_RC ||
4169 cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
4170 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
4171 IB_QP_MIN_RNR_TIMER;
4172 qp_attr->max_dest_rd_atomic =
4173 cm_id_priv->responder_resources;
4174 qp_attr->min_rnr_timer = 0;
4176 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr) &&
4177 cm_id_priv->alt_av.port) {
4178 *qp_attr_mask |= IB_QP_ALT_PATH;
4179 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4180 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4181 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4182 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4187 trace_icm_qp_rtr_err(&cm_id_priv->id);
4191 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4195 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
4196 struct ib_qp_attr *qp_attr,
4199 unsigned long flags;
4202 spin_lock_irqsave(&cm_id_priv->lock, flags);
4203 switch (cm_id_priv->id.state) {
4204 /* Allow transition to RTS before sending REP */
4205 case IB_CM_REQ_RCVD:
4206 case IB_CM_MRA_REQ_SENT:
4208 case IB_CM_REP_RCVD:
4209 case IB_CM_MRA_REP_SENT:
4210 case IB_CM_REP_SENT:
4211 case IB_CM_MRA_REP_RCVD:
4212 case IB_CM_ESTABLISHED:
4213 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
4214 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
4215 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
4216 switch (cm_id_priv->qp_type) {
4218 case IB_QPT_XRC_INI:
4219 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
4220 IB_QP_MAX_QP_RD_ATOMIC;
4221 qp_attr->retry_cnt = cm_id_priv->retry_count;
4222 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
4223 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
4225 case IB_QPT_XRC_TGT:
4226 *qp_attr_mask |= IB_QP_TIMEOUT;
4227 qp_attr->timeout = cm_id_priv->av.timeout;
4232 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4233 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
4234 qp_attr->path_mig_state = IB_MIG_REARM;
4237 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
4238 if (cm_id_priv->alt_av.port)
4239 qp_attr->alt_port_num =
4240 cm_id_priv->alt_av.port->port_num;
4241 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4242 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4243 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4244 qp_attr->path_mig_state = IB_MIG_REARM;
4249 trace_icm_qp_rts_err(&cm_id_priv->id);
4253 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4257 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
4258 struct ib_qp_attr *qp_attr,
4261 struct cm_id_private *cm_id_priv;
4264 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
4265 switch (qp_attr->qp_state) {
4267 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
4270 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
4273 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
4281 EXPORT_SYMBOL(ib_cm_init_qp_attr);
4283 static ssize_t cm_show_counter(struct ib_device *ibdev, u32 port_num,
4284 struct ib_port_attribute *attr, char *buf)
4286 struct cm_counter_attribute *cm_attr =
4287 container_of(attr, struct cm_counter_attribute, attr);
4288 struct cm_device *cm_dev = ib_get_client_data(ibdev, &cm_client);
4290 if (WARN_ON(!cm_dev))
4296 &cm_dev->port[port_num - 1]
4297 ->counters[cm_attr->group][cm_attr->index]));
4300 #define CM_COUNTER_ATTR(_name, _group, _index) \
4302 .attr = __ATTR(_name, 0444, cm_show_counter, NULL), \
4303 .group = _group, .index = _index \
4306 #define CM_COUNTER_GROUP(_group, _name) \
4307 static struct cm_counter_attribute cm_counter_attr_##_group[] = { \
4308 CM_COUNTER_ATTR(req, _group, CM_REQ_COUNTER), \
4309 CM_COUNTER_ATTR(mra, _group, CM_MRA_COUNTER), \
4310 CM_COUNTER_ATTR(rej, _group, CM_REJ_COUNTER), \
4311 CM_COUNTER_ATTR(rep, _group, CM_REP_COUNTER), \
4312 CM_COUNTER_ATTR(rtu, _group, CM_RTU_COUNTER), \
4313 CM_COUNTER_ATTR(dreq, _group, CM_DREQ_COUNTER), \
4314 CM_COUNTER_ATTR(drep, _group, CM_DREP_COUNTER), \
4315 CM_COUNTER_ATTR(sidr_req, _group, CM_SIDR_REQ_COUNTER), \
4316 CM_COUNTER_ATTR(sidr_rep, _group, CM_SIDR_REP_COUNTER), \
4317 CM_COUNTER_ATTR(lap, _group, CM_LAP_COUNTER), \
4318 CM_COUNTER_ATTR(apr, _group, CM_APR_COUNTER), \
4320 static struct attribute *cm_counter_attrs_##_group[] = { \
4321 &cm_counter_attr_##_group[0].attr.attr, \
4322 &cm_counter_attr_##_group[1].attr.attr, \
4323 &cm_counter_attr_##_group[2].attr.attr, \
4324 &cm_counter_attr_##_group[3].attr.attr, \
4325 &cm_counter_attr_##_group[4].attr.attr, \
4326 &cm_counter_attr_##_group[5].attr.attr, \
4327 &cm_counter_attr_##_group[6].attr.attr, \
4328 &cm_counter_attr_##_group[7].attr.attr, \
4329 &cm_counter_attr_##_group[8].attr.attr, \
4330 &cm_counter_attr_##_group[9].attr.attr, \
4331 &cm_counter_attr_##_group[10].attr.attr, \
4334 static const struct attribute_group cm_counter_group_##_group = { \
4336 .attrs = cm_counter_attrs_##_group, \
4339 CM_COUNTER_GROUP(CM_XMIT, "cm_tx_msgs")
4340 CM_COUNTER_GROUP(CM_XMIT_RETRIES, "cm_tx_retries")
4341 CM_COUNTER_GROUP(CM_RECV, "cm_rx_msgs")
4342 CM_COUNTER_GROUP(CM_RECV_DUPLICATES, "cm_rx_duplicates")
4344 static const struct attribute_group *cm_counter_groups[] = {
4345 &cm_counter_group_CM_XMIT,
4346 &cm_counter_group_CM_XMIT_RETRIES,
4347 &cm_counter_group_CM_RECV,
4348 &cm_counter_group_CM_RECV_DUPLICATES,
4352 static int cm_add_one(struct ib_device *ib_device)
4354 struct cm_device *cm_dev;
4355 struct cm_port *port;
4356 struct ib_mad_reg_req reg_req = {
4357 .mgmt_class = IB_MGMT_CLASS_CM,
4358 .mgmt_class_version = IB_CM_CLASS_VERSION,
4360 struct ib_port_modify port_modify = {
4361 .set_port_cap_mask = IB_PORT_CM_SUP
4363 unsigned long flags;
4368 cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
4373 kref_init(&cm_dev->kref);
4374 spin_lock_init(&cm_dev->mad_agent_lock);
4375 cm_dev->ib_device = ib_device;
4376 cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
4377 cm_dev->going_down = 0;
4379 ib_set_client_data(ib_device, &cm_client, cm_dev);
4381 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
4382 rdma_for_each_port (ib_device, i) {
4383 if (!rdma_cap_ib_cm(ib_device, i))
4386 port = kzalloc(sizeof *port, GFP_KERNEL);
4392 cm_dev->port[i-1] = port;
4393 port->cm_dev = cm_dev;
4396 ret = ib_port_register_client_groups(ib_device, i,
4401 port->mad_agent = ib_register_mad_agent(ib_device, i,
4409 if (IS_ERR(port->mad_agent)) {
4410 ret = PTR_ERR(port->mad_agent);
4414 ret = ib_modify_port(ib_device, i, 0, &port_modify);
4426 write_lock_irqsave(&cm.device_lock, flags);
4427 list_add_tail(&cm_dev->list, &cm.device_list);
4428 write_unlock_irqrestore(&cm.device_lock, flags);
4432 ib_unregister_mad_agent(port->mad_agent);
4434 ib_port_unregister_client_groups(ib_device, i, cm_counter_groups);
4436 port_modify.set_port_cap_mask = 0;
4437 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
4439 if (!rdma_cap_ib_cm(ib_device, i))
4442 port = cm_dev->port[i-1];
4443 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4444 ib_unregister_mad_agent(port->mad_agent);
4445 ib_port_unregister_client_groups(ib_device, i,
4449 cm_device_put(cm_dev);
4453 static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4455 struct cm_device *cm_dev = client_data;
4456 struct cm_port *port;
4457 struct ib_port_modify port_modify = {
4458 .clr_port_cap_mask = IB_PORT_CM_SUP
4460 unsigned long flags;
4463 write_lock_irqsave(&cm.device_lock, flags);
4464 list_del(&cm_dev->list);
4465 write_unlock_irqrestore(&cm.device_lock, flags);
4467 spin_lock_irq(&cm.lock);
4468 cm_dev->going_down = 1;
4469 spin_unlock_irq(&cm.lock);
4471 rdma_for_each_port (ib_device, i) {
4472 struct ib_mad_agent *mad_agent;
4474 if (!rdma_cap_ib_cm(ib_device, i))
4477 port = cm_dev->port[i-1];
4478 mad_agent = port->mad_agent;
4479 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4481 * We flush the queue here after the going_down set, this
4482 * verify that no new works will be queued in the recv handler,
4483 * after that we can call the unregister_mad_agent
4485 flush_workqueue(cm.wq);
4487 * The above ensures no call paths from the work are running,
4488 * the remaining paths all take the mad_agent_lock.
4490 spin_lock(&cm_dev->mad_agent_lock);
4491 port->mad_agent = NULL;
4492 spin_unlock(&cm_dev->mad_agent_lock);
4493 ib_unregister_mad_agent(mad_agent);
4494 ib_port_unregister_client_groups(ib_device, i,
4498 cm_device_put(cm_dev);
4501 static int __init ib_cm_init(void)
4505 INIT_LIST_HEAD(&cm.device_list);
4506 rwlock_init(&cm.device_lock);
4507 spin_lock_init(&cm.lock);
4508 cm.listen_service_table = RB_ROOT;
4509 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4510 cm.remote_id_table = RB_ROOT;
4511 cm.remote_qp_table = RB_ROOT;
4512 cm.remote_sidr_table = RB_ROOT;
4513 xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC);
4514 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4515 INIT_LIST_HEAD(&cm.timewait_list);
4517 cm.wq = alloc_workqueue("ib_cm", 0, 1);
4523 ret = ib_register_client(&cm_client);
4529 destroy_workqueue(cm.wq);
4534 static void __exit ib_cm_cleanup(void)
4536 struct cm_timewait_info *timewait_info, *tmp;
4538 spin_lock_irq(&cm.lock);
4539 list_for_each_entry(timewait_info, &cm.timewait_list, list)
4540 cancel_delayed_work(&timewait_info->work.work);
4541 spin_unlock_irq(&cm.lock);
4543 ib_unregister_client(&cm_client);
4544 destroy_workqueue(cm.wq);
4546 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4547 list_del(&timewait_info->list);
4548 kfree(timewait_info);
4551 WARN_ON(!xa_empty(&cm.local_id_table));
4554 module_init(ib_cm_init);
4555 module_exit(ib_cm_cleanup);