2 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/device.h>
39 #include <linux/module.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/interrupt.h>
43 #include <linux/random.h>
44 #include <linux/rbtree.h>
45 #include <linux/spinlock.h>
46 #include <linux/slab.h>
47 #include <linux/sysfs.h>
48 #include <linux/workqueue.h>
49 #include <linux/kdev_t.h>
50 #include <linux/etherdevice.h>
52 #include <rdma/ib_cache.h>
53 #include <rdma/ib_cm.h>
56 MODULE_AUTHOR("Sean Hefty");
57 MODULE_DESCRIPTION("InfiniBand CM");
58 MODULE_LICENSE("Dual BSD/GPL");
60 static void cm_add_one(struct ib_device *device);
61 static void cm_remove_one(struct ib_device *device, void *client_data);
63 static struct ib_client cm_client = {
66 .remove = cm_remove_one
71 struct list_head device_list;
73 struct rb_root listen_service_table;
74 u64 listen_service_id;
75 /* struct rb_root peer_service_table; todo: fix peer to peer */
76 struct rb_root remote_qp_table;
77 struct rb_root remote_id_table;
78 struct rb_root remote_sidr_table;
79 struct idr local_id_table;
80 __be32 random_id_operand;
81 struct list_head timewait_list;
82 struct workqueue_struct *wq;
85 /* Counter indexes ordered by attribute ID */
99 CM_ATTR_ID_OFFSET = 0x0010,
110 static char const counter_group_names[CM_COUNTER_GROUPS]
111 [sizeof("cm_rx_duplicates")] = {
112 "cm_tx_msgs", "cm_tx_retries",
113 "cm_rx_msgs", "cm_rx_duplicates"
116 struct cm_counter_group {
118 atomic_long_t counter[CM_ATTR_COUNT];
121 struct cm_counter_attribute {
122 struct attribute attr;
126 #define CM_COUNTER_ATTR(_name, _index) \
127 struct cm_counter_attribute cm_##_name##_counter_attr = { \
128 .attr = { .name = __stringify(_name), .mode = 0444 }, \
132 static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
133 static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
134 static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
135 static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
136 static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
137 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
138 static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
139 static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
140 static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
141 static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
142 static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
144 static struct attribute *cm_counter_default_attrs[] = {
145 &cm_req_counter_attr.attr,
146 &cm_mra_counter_attr.attr,
147 &cm_rej_counter_attr.attr,
148 &cm_rep_counter_attr.attr,
149 &cm_rtu_counter_attr.attr,
150 &cm_dreq_counter_attr.attr,
151 &cm_drep_counter_attr.attr,
152 &cm_sidr_req_counter_attr.attr,
153 &cm_sidr_rep_counter_attr.attr,
154 &cm_lap_counter_attr.attr,
155 &cm_apr_counter_attr.attr,
160 struct cm_device *cm_dev;
161 struct ib_mad_agent *mad_agent;
162 struct kobject port_obj;
164 struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
168 struct list_head list;
169 struct ib_device *ib_device;
170 struct device *device;
173 struct cm_port *port[0];
177 struct cm_port *port;
179 struct ib_ah_attr ah_attr;
187 struct delayed_work work;
188 struct list_head list;
189 struct cm_port *port;
190 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
191 __be32 local_id; /* Established / timewait */
193 struct ib_cm_event cm_event;
194 struct ib_sa_path_rec path[0];
197 struct cm_timewait_info {
198 struct cm_work work; /* Must be first. */
199 struct list_head list;
200 struct rb_node remote_qp_node;
201 struct rb_node remote_id_node;
202 __be64 remote_ca_guid;
204 u8 inserted_remote_qp;
205 u8 inserted_remote_id;
208 struct cm_id_private {
211 struct rb_node service_node;
212 struct rb_node sidr_id_node;
213 spinlock_t lock; /* Do not acquire inside cm.lock */
214 struct completion comp;
216 /* Number of clients sharing this ib_cm_id. Only valid for listeners.
217 * Protected by the cm.lock spinlock. */
218 int listen_sharecount;
220 struct ib_mad_send_buf *msg;
221 struct cm_timewait_info *timewait_info;
222 /* todo: use alternate port on send failure */
230 enum ib_qp_type qp_type;
234 enum ib_mtu path_mtu;
239 u8 responder_resources;
246 struct list_head work_list;
250 static void cm_work_handler(struct work_struct *work);
252 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
254 if (atomic_dec_and_test(&cm_id_priv->refcount))
255 complete(&cm_id_priv->comp);
258 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
259 struct ib_mad_send_buf **msg)
261 struct ib_mad_agent *mad_agent;
262 struct ib_mad_send_buf *m;
265 mad_agent = cm_id_priv->av.port->mad_agent;
266 ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
270 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
271 cm_id_priv->av.pkey_index,
272 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
274 IB_MGMT_BASE_VERSION);
280 /* Timeout set by caller if response is expected. */
282 m->retries = cm_id_priv->max_cm_retries;
284 atomic_inc(&cm_id_priv->refcount);
285 m->context[0] = cm_id_priv;
290 static int cm_alloc_response_msg(struct cm_port *port,
291 struct ib_mad_recv_wc *mad_recv_wc,
292 struct ib_mad_send_buf **msg)
294 struct ib_mad_send_buf *m;
297 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
298 mad_recv_wc->recv_buf.grh, port->port_num);
302 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
303 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
305 IB_MGMT_BASE_VERSION);
315 static void cm_free_msg(struct ib_mad_send_buf *msg)
317 ib_destroy_ah(msg->ah);
319 cm_deref_id(msg->context[0]);
320 ib_free_send_mad(msg);
323 static void * cm_copy_private_data(const void *private_data,
328 if (!private_data || !private_data_len)
331 data = kmemdup(private_data, private_data_len, GFP_KERNEL);
333 return ERR_PTR(-ENOMEM);
338 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
339 void *private_data, u8 private_data_len)
341 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
342 kfree(cm_id_priv->private_data);
344 cm_id_priv->private_data = private_data;
345 cm_id_priv->private_data_len = private_data_len;
348 static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
349 struct ib_grh *grh, struct cm_av *av)
352 av->pkey_index = wc->pkey_index;
353 ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
357 static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
359 struct cm_device *cm_dev;
360 struct cm_port *port = NULL;
365 read_lock_irqsave(&cm.device_lock, flags);
366 list_for_each_entry(cm_dev, &cm.device_list, list) {
367 if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid,
369 port = cm_dev->port[p-1];
373 read_unlock_irqrestore(&cm.device_lock, flags);
378 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
379 be16_to_cpu(path->pkey), &av->pkey_index);
384 ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
386 av->timeout = path->packet_life_time + 1;
387 memcpy(av->smac, path->smac, sizeof(av->smac));
393 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
398 idr_preload(GFP_KERNEL);
399 spin_lock_irqsave(&cm.lock, flags);
401 id = idr_alloc_cyclic(&cm.local_id_table, cm_id_priv, 0, 0, GFP_NOWAIT);
403 spin_unlock_irqrestore(&cm.lock, flags);
406 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
407 return id < 0 ? id : 0;
410 static void cm_free_id(__be32 local_id)
412 spin_lock_irq(&cm.lock);
413 idr_remove(&cm.local_id_table,
414 (__force int) (local_id ^ cm.random_id_operand));
415 spin_unlock_irq(&cm.lock);
418 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
420 struct cm_id_private *cm_id_priv;
422 cm_id_priv = idr_find(&cm.local_id_table,
423 (__force int) (local_id ^ cm.random_id_operand));
425 if (cm_id_priv->id.remote_id == remote_id)
426 atomic_inc(&cm_id_priv->refcount);
434 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
436 struct cm_id_private *cm_id_priv;
438 spin_lock_irq(&cm.lock);
439 cm_id_priv = cm_get_id(local_id, remote_id);
440 spin_unlock_irq(&cm.lock);
446 * Trivial helpers to strip endian annotation and compare; the
447 * endianness doesn't actually matter since we just need a stable
448 * order for the RB tree.
450 static int be32_lt(__be32 a, __be32 b)
452 return (__force u32) a < (__force u32) b;
455 static int be32_gt(__be32 a, __be32 b)
457 return (__force u32) a > (__force u32) b;
460 static int be64_lt(__be64 a, __be64 b)
462 return (__force u64) a < (__force u64) b;
465 static int be64_gt(__be64 a, __be64 b)
467 return (__force u64) a > (__force u64) b;
470 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
472 struct rb_node **link = &cm.listen_service_table.rb_node;
473 struct rb_node *parent = NULL;
474 struct cm_id_private *cur_cm_id_priv;
475 __be64 service_id = cm_id_priv->id.service_id;
476 __be64 service_mask = cm_id_priv->id.service_mask;
480 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
482 if ((cur_cm_id_priv->id.service_mask & service_id) ==
483 (service_mask & cur_cm_id_priv->id.service_id) &&
484 (cm_id_priv->id.device == cur_cm_id_priv->id.device))
485 return cur_cm_id_priv;
487 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
488 link = &(*link)->rb_left;
489 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
490 link = &(*link)->rb_right;
491 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
492 link = &(*link)->rb_left;
493 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
494 link = &(*link)->rb_right;
496 link = &(*link)->rb_right;
498 rb_link_node(&cm_id_priv->service_node, parent, link);
499 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
503 static struct cm_id_private * cm_find_listen(struct ib_device *device,
506 struct rb_node *node = cm.listen_service_table.rb_node;
507 struct cm_id_private *cm_id_priv;
510 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
511 if ((cm_id_priv->id.service_mask & service_id) ==
512 cm_id_priv->id.service_id &&
513 (cm_id_priv->id.device == device))
516 if (device < cm_id_priv->id.device)
517 node = node->rb_left;
518 else if (device > cm_id_priv->id.device)
519 node = node->rb_right;
520 else if (be64_lt(service_id, cm_id_priv->id.service_id))
521 node = node->rb_left;
522 else if (be64_gt(service_id, cm_id_priv->id.service_id))
523 node = node->rb_right;
525 node = node->rb_right;
530 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
533 struct rb_node **link = &cm.remote_id_table.rb_node;
534 struct rb_node *parent = NULL;
535 struct cm_timewait_info *cur_timewait_info;
536 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
537 __be32 remote_id = timewait_info->work.remote_id;
541 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
543 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
544 link = &(*link)->rb_left;
545 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
546 link = &(*link)->rb_right;
547 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
548 link = &(*link)->rb_left;
549 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
550 link = &(*link)->rb_right;
552 return cur_timewait_info;
554 timewait_info->inserted_remote_id = 1;
555 rb_link_node(&timewait_info->remote_id_node, parent, link);
556 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
560 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
563 struct rb_node *node = cm.remote_id_table.rb_node;
564 struct cm_timewait_info *timewait_info;
567 timewait_info = rb_entry(node, struct cm_timewait_info,
569 if (be32_lt(remote_id, timewait_info->work.remote_id))
570 node = node->rb_left;
571 else if (be32_gt(remote_id, timewait_info->work.remote_id))
572 node = node->rb_right;
573 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
574 node = node->rb_left;
575 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
576 node = node->rb_right;
578 return timewait_info;
583 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
586 struct rb_node **link = &cm.remote_qp_table.rb_node;
587 struct rb_node *parent = NULL;
588 struct cm_timewait_info *cur_timewait_info;
589 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
590 __be32 remote_qpn = timewait_info->remote_qpn;
594 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
596 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
597 link = &(*link)->rb_left;
598 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
599 link = &(*link)->rb_right;
600 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
601 link = &(*link)->rb_left;
602 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
603 link = &(*link)->rb_right;
605 return cur_timewait_info;
607 timewait_info->inserted_remote_qp = 1;
608 rb_link_node(&timewait_info->remote_qp_node, parent, link);
609 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
613 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
616 struct rb_node **link = &cm.remote_sidr_table.rb_node;
617 struct rb_node *parent = NULL;
618 struct cm_id_private *cur_cm_id_priv;
619 union ib_gid *port_gid = &cm_id_priv->av.dgid;
620 __be32 remote_id = cm_id_priv->id.remote_id;
624 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
626 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
627 link = &(*link)->rb_left;
628 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
629 link = &(*link)->rb_right;
632 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
635 link = &(*link)->rb_left;
637 link = &(*link)->rb_right;
639 return cur_cm_id_priv;
642 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
643 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
647 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
648 enum ib_cm_sidr_status status)
650 struct ib_cm_sidr_rep_param param;
652 memset(¶m, 0, sizeof param);
653 param.status = status;
654 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m);
657 struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
658 ib_cm_handler cm_handler,
661 struct cm_id_private *cm_id_priv;
664 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
666 return ERR_PTR(-ENOMEM);
668 cm_id_priv->id.state = IB_CM_IDLE;
669 cm_id_priv->id.device = device;
670 cm_id_priv->id.cm_handler = cm_handler;
671 cm_id_priv->id.context = context;
672 cm_id_priv->id.remote_cm_qpn = 1;
673 ret = cm_alloc_id(cm_id_priv);
677 spin_lock_init(&cm_id_priv->lock);
678 init_completion(&cm_id_priv->comp);
679 INIT_LIST_HEAD(&cm_id_priv->work_list);
680 atomic_set(&cm_id_priv->work_count, -1);
681 atomic_set(&cm_id_priv->refcount, 1);
682 return &cm_id_priv->id;
686 return ERR_PTR(-ENOMEM);
688 EXPORT_SYMBOL(ib_create_cm_id);
690 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
692 struct cm_work *work;
694 if (list_empty(&cm_id_priv->work_list))
697 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
698 list_del(&work->list);
702 static void cm_free_work(struct cm_work *work)
704 if (work->mad_recv_wc)
705 ib_free_recv_mad(work->mad_recv_wc);
709 static inline int cm_convert_to_ms(int iba_time)
711 /* approximate conversion to ms from 4.096us x 2^iba_time */
712 return 1 << max(iba_time - 8, 0);
716 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
717 * Because of how ack_timeout is stored, adding one doubles the timeout.
718 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
719 * increment it (round up) only if the other is within 50%.
721 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
723 int ack_timeout = packet_life_time + 1;
725 if (ack_timeout >= ca_ack_delay)
726 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
728 ack_timeout = ca_ack_delay +
729 (ack_timeout >= (ca_ack_delay - 1));
731 return min(31, ack_timeout);
734 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
736 if (timewait_info->inserted_remote_id) {
737 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
738 timewait_info->inserted_remote_id = 0;
741 if (timewait_info->inserted_remote_qp) {
742 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
743 timewait_info->inserted_remote_qp = 0;
747 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
749 struct cm_timewait_info *timewait_info;
751 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
753 return ERR_PTR(-ENOMEM);
755 timewait_info->work.local_id = local_id;
756 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
757 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
758 return timewait_info;
761 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
765 struct cm_device *cm_dev;
767 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
771 spin_lock_irqsave(&cm.lock, flags);
772 cm_cleanup_timewait(cm_id_priv->timewait_info);
773 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
774 spin_unlock_irqrestore(&cm.lock, flags);
777 * The cm_id could be destroyed by the user before we exit timewait.
778 * To protect against this, we search for the cm_id after exiting
779 * timewait before notifying the user that we've exited timewait.
781 cm_id_priv->id.state = IB_CM_TIMEWAIT;
782 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
784 /* Check if the device started its remove_one */
785 spin_lock_irq(&cm.lock);
786 if (!cm_dev->going_down)
787 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
788 msecs_to_jiffies(wait_time));
789 spin_unlock_irq(&cm.lock);
791 cm_id_priv->timewait_info = NULL;
794 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
798 cm_id_priv->id.state = IB_CM_IDLE;
799 if (cm_id_priv->timewait_info) {
800 spin_lock_irqsave(&cm.lock, flags);
801 cm_cleanup_timewait(cm_id_priv->timewait_info);
802 spin_unlock_irqrestore(&cm.lock, flags);
803 kfree(cm_id_priv->timewait_info);
804 cm_id_priv->timewait_info = NULL;
808 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
810 struct cm_id_private *cm_id_priv;
811 struct cm_work *work;
813 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
815 spin_lock_irq(&cm_id_priv->lock);
816 switch (cm_id->state) {
818 spin_unlock_irq(&cm_id_priv->lock);
820 spin_lock_irq(&cm.lock);
821 if (--cm_id_priv->listen_sharecount > 0) {
822 /* The id is still shared. */
823 cm_deref_id(cm_id_priv);
824 spin_unlock_irq(&cm.lock);
827 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
828 spin_unlock_irq(&cm.lock);
830 case IB_CM_SIDR_REQ_SENT:
831 cm_id->state = IB_CM_IDLE;
832 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
833 spin_unlock_irq(&cm_id_priv->lock);
835 case IB_CM_SIDR_REQ_RCVD:
836 spin_unlock_irq(&cm_id_priv->lock);
837 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
838 spin_lock_irq(&cm.lock);
839 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
840 rb_erase(&cm_id_priv->sidr_id_node,
841 &cm.remote_sidr_table);
842 spin_unlock_irq(&cm.lock);
845 case IB_CM_MRA_REQ_RCVD:
846 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
847 spin_unlock_irq(&cm_id_priv->lock);
848 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
849 &cm_id_priv->id.device->node_guid,
850 sizeof cm_id_priv->id.device->node_guid,
854 if (err == -ENOMEM) {
855 /* Do not reject to allow future retries. */
856 cm_reset_to_idle(cm_id_priv);
857 spin_unlock_irq(&cm_id_priv->lock);
859 spin_unlock_irq(&cm_id_priv->lock);
860 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
865 case IB_CM_MRA_REP_RCVD:
866 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
868 case IB_CM_MRA_REQ_SENT:
870 case IB_CM_MRA_REP_SENT:
871 spin_unlock_irq(&cm_id_priv->lock);
872 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
875 case IB_CM_ESTABLISHED:
876 spin_unlock_irq(&cm_id_priv->lock);
877 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
879 ib_send_cm_dreq(cm_id, NULL, 0);
881 case IB_CM_DREQ_SENT:
882 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
883 cm_enter_timewait(cm_id_priv);
884 spin_unlock_irq(&cm_id_priv->lock);
886 case IB_CM_DREQ_RCVD:
887 spin_unlock_irq(&cm_id_priv->lock);
888 ib_send_cm_drep(cm_id, NULL, 0);
891 spin_unlock_irq(&cm_id_priv->lock);
895 cm_free_id(cm_id->local_id);
896 cm_deref_id(cm_id_priv);
897 wait_for_completion(&cm_id_priv->comp);
898 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
900 kfree(cm_id_priv->private_data);
904 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
906 cm_destroy_id(cm_id, 0);
908 EXPORT_SYMBOL(ib_destroy_cm_id);
911 * __ib_cm_listen - Initiates listening on the specified service ID for
912 * connection and service ID resolution requests.
913 * @cm_id: Connection identifier associated with the listen request.
914 * @service_id: Service identifier matched against incoming connection
915 * and service ID resolution requests. The service ID should be specified
916 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
917 * assign a service ID to the caller.
918 * @service_mask: Mask applied to service ID used to listen across a
919 * range of service IDs. If set to 0, the service ID is matched
920 * exactly. This parameter is ignored if %service_id is set to
921 * IB_CM_ASSIGN_SERVICE_ID.
923 static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
926 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
929 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
930 service_id &= service_mask;
931 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
932 (service_id != IB_CM_ASSIGN_SERVICE_ID))
935 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
936 if (cm_id->state != IB_CM_IDLE)
939 cm_id->state = IB_CM_LISTEN;
940 ++cm_id_priv->listen_sharecount;
942 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
943 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
944 cm_id->service_mask = ~cpu_to_be64(0);
946 cm_id->service_id = service_id;
947 cm_id->service_mask = service_mask;
949 cur_cm_id_priv = cm_insert_listen(cm_id_priv);
951 if (cur_cm_id_priv) {
952 cm_id->state = IB_CM_IDLE;
953 --cm_id_priv->listen_sharecount;
959 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
964 spin_lock_irqsave(&cm.lock, flags);
965 ret = __ib_cm_listen(cm_id, service_id, service_mask);
966 spin_unlock_irqrestore(&cm.lock, flags);
970 EXPORT_SYMBOL(ib_cm_listen);
973 * Create a new listening ib_cm_id and listen on the given service ID.
975 * If there's an existing ID listening on that same device and service ID,
978 * @device: Device associated with the cm_id. All related communication will
979 * be associated with the specified device.
980 * @cm_handler: Callback invoked to notify the user of CM events.
981 * @service_id: Service identifier matched against incoming connection
982 * and service ID resolution requests. The service ID should be specified
983 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
984 * assign a service ID to the caller.
986 * Callers should call ib_destroy_cm_id when done with the listener ID.
988 struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
989 ib_cm_handler cm_handler,
992 struct cm_id_private *cm_id_priv;
993 struct ib_cm_id *cm_id;
997 /* Create an ID in advance, since the creation may sleep */
998 cm_id = ib_create_cm_id(device, cm_handler, NULL);
1002 spin_lock_irqsave(&cm.lock, flags);
1004 if (service_id == IB_CM_ASSIGN_SERVICE_ID)
1007 /* Find an existing ID */
1008 cm_id_priv = cm_find_listen(device, service_id);
1010 if (cm_id->cm_handler != cm_handler || cm_id->context) {
1011 /* Sharing an ib_cm_id with different handlers is not
1013 spin_unlock_irqrestore(&cm.lock, flags);
1014 return ERR_PTR(-EINVAL);
1016 atomic_inc(&cm_id_priv->refcount);
1017 ++cm_id_priv->listen_sharecount;
1018 spin_unlock_irqrestore(&cm.lock, flags);
1020 ib_destroy_cm_id(cm_id);
1021 cm_id = &cm_id_priv->id;
1026 /* Use newly created ID */
1027 err = __ib_cm_listen(cm_id, service_id, 0);
1029 spin_unlock_irqrestore(&cm.lock, flags);
1032 ib_destroy_cm_id(cm_id);
1033 return ERR_PTR(err);
1037 EXPORT_SYMBOL(ib_cm_insert_listen);
1039 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
1040 enum cm_msg_sequence msg_seq)
1042 u64 hi_tid, low_tid;
1044 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1045 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
1047 return cpu_to_be64(hi_tid | low_tid);
1050 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1051 __be16 attr_id, __be64 tid)
1053 hdr->base_version = IB_MGMT_BASE_VERSION;
1054 hdr->mgmt_class = IB_MGMT_CLASS_CM;
1055 hdr->class_version = IB_CM_CLASS_VERSION;
1056 hdr->method = IB_MGMT_METHOD_SEND;
1057 hdr->attr_id = attr_id;
1061 static void cm_format_req(struct cm_req_msg *req_msg,
1062 struct cm_id_private *cm_id_priv,
1063 struct ib_cm_req_param *param)
1065 struct ib_sa_path_rec *pri_path = param->primary_path;
1066 struct ib_sa_path_rec *alt_path = param->alternate_path;
1068 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1069 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
1071 req_msg->local_comm_id = cm_id_priv->id.local_id;
1072 req_msg->service_id = param->service_id;
1073 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1074 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
1075 cm_req_set_init_depth(req_msg, param->initiator_depth);
1076 cm_req_set_remote_resp_timeout(req_msg,
1077 param->remote_cm_response_timeout);
1078 cm_req_set_qp_type(req_msg, param->qp_type);
1079 cm_req_set_flow_ctrl(req_msg, param->flow_control);
1080 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
1081 cm_req_set_local_resp_timeout(req_msg,
1082 param->local_cm_response_timeout);
1083 req_msg->pkey = param->primary_path->pkey;
1084 cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
1085 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
1087 if (param->qp_type != IB_QPT_XRC_INI) {
1088 cm_req_set_resp_res(req_msg, param->responder_resources);
1089 cm_req_set_retry_count(req_msg, param->retry_count);
1090 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
1091 cm_req_set_srq(req_msg, param->srq);
1094 if (pri_path->hop_limit <= 1) {
1095 req_msg->primary_local_lid = pri_path->slid;
1096 req_msg->primary_remote_lid = pri_path->dlid;
1098 /* Work-around until there's a way to obtain remote LID info */
1099 req_msg->primary_local_lid = IB_LID_PERMISSIVE;
1100 req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
1102 req_msg->primary_local_gid = pri_path->sgid;
1103 req_msg->primary_remote_gid = pri_path->dgid;
1104 cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
1105 cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
1106 req_msg->primary_traffic_class = pri_path->traffic_class;
1107 req_msg->primary_hop_limit = pri_path->hop_limit;
1108 cm_req_set_primary_sl(req_msg, pri_path->sl);
1109 cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
1110 cm_req_set_primary_local_ack_timeout(req_msg,
1111 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1112 pri_path->packet_life_time));
1115 if (alt_path->hop_limit <= 1) {
1116 req_msg->alt_local_lid = alt_path->slid;
1117 req_msg->alt_remote_lid = alt_path->dlid;
1119 req_msg->alt_local_lid = IB_LID_PERMISSIVE;
1120 req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
1122 req_msg->alt_local_gid = alt_path->sgid;
1123 req_msg->alt_remote_gid = alt_path->dgid;
1124 cm_req_set_alt_flow_label(req_msg,
1125 alt_path->flow_label);
1126 cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
1127 req_msg->alt_traffic_class = alt_path->traffic_class;
1128 req_msg->alt_hop_limit = alt_path->hop_limit;
1129 cm_req_set_alt_sl(req_msg, alt_path->sl);
1130 cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
1131 cm_req_set_alt_local_ack_timeout(req_msg,
1132 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1133 alt_path->packet_life_time));
1136 if (param->private_data && param->private_data_len)
1137 memcpy(req_msg->private_data, param->private_data,
1138 param->private_data_len);
1141 static int cm_validate_req_param(struct ib_cm_req_param *param)
1143 /* peer-to-peer not supported */
1144 if (param->peer_to_peer)
1147 if (!param->primary_path)
1150 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1151 param->qp_type != IB_QPT_XRC_INI)
1154 if (param->private_data &&
1155 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1158 if (param->alternate_path &&
1159 (param->alternate_path->pkey != param->primary_path->pkey ||
1160 param->alternate_path->mtu != param->primary_path->mtu))
1166 int ib_send_cm_req(struct ib_cm_id *cm_id,
1167 struct ib_cm_req_param *param)
1169 struct cm_id_private *cm_id_priv;
1170 struct cm_req_msg *req_msg;
1171 unsigned long flags;
1174 ret = cm_validate_req_param(param);
1178 /* Verify that we're not in timewait. */
1179 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1180 spin_lock_irqsave(&cm_id_priv->lock, flags);
1181 if (cm_id->state != IB_CM_IDLE) {
1182 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1186 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1188 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1190 if (IS_ERR(cm_id_priv->timewait_info)) {
1191 ret = PTR_ERR(cm_id_priv->timewait_info);
1195 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
1198 if (param->alternate_path) {
1199 ret = cm_init_av_by_path(param->alternate_path,
1200 &cm_id_priv->alt_av);
1204 cm_id->service_id = param->service_id;
1205 cm_id->service_mask = ~cpu_to_be64(0);
1206 cm_id_priv->timeout_ms = cm_convert_to_ms(
1207 param->primary_path->packet_life_time) * 2 +
1209 param->remote_cm_response_timeout);
1210 cm_id_priv->max_cm_retries = param->max_cm_retries;
1211 cm_id_priv->initiator_depth = param->initiator_depth;
1212 cm_id_priv->responder_resources = param->responder_resources;
1213 cm_id_priv->retry_count = param->retry_count;
1214 cm_id_priv->path_mtu = param->primary_path->mtu;
1215 cm_id_priv->pkey = param->primary_path->pkey;
1216 cm_id_priv->qp_type = param->qp_type;
1218 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1222 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1223 cm_format_req(req_msg, cm_id_priv, param);
1224 cm_id_priv->tid = req_msg->hdr.tid;
1225 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1226 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1228 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1229 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1231 spin_lock_irqsave(&cm_id_priv->lock, flags);
1232 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1234 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1237 BUG_ON(cm_id->state != IB_CM_IDLE);
1238 cm_id->state = IB_CM_REQ_SENT;
1239 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1242 error2: cm_free_msg(cm_id_priv->msg);
1243 error1: kfree(cm_id_priv->timewait_info);
1246 EXPORT_SYMBOL(ib_send_cm_req);
1248 static int cm_issue_rej(struct cm_port *port,
1249 struct ib_mad_recv_wc *mad_recv_wc,
1250 enum ib_cm_rej_reason reason,
1251 enum cm_msg_response msg_rejected,
1252 void *ari, u8 ari_length)
1254 struct ib_mad_send_buf *msg = NULL;
1255 struct cm_rej_msg *rej_msg, *rcv_msg;
1258 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1262 /* We just need common CM header information. Cast to any message. */
1263 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1264 rej_msg = (struct cm_rej_msg *) msg->mad;
1266 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1267 rej_msg->remote_comm_id = rcv_msg->local_comm_id;
1268 rej_msg->local_comm_id = rcv_msg->remote_comm_id;
1269 cm_rej_set_msg_rejected(rej_msg, msg_rejected);
1270 rej_msg->reason = cpu_to_be16(reason);
1272 if (ari && ari_length) {
1273 cm_rej_set_reject_info_len(rej_msg, ari_length);
1274 memcpy(rej_msg->ari, ari, ari_length);
1277 ret = ib_post_send_mad(msg, NULL);
1284 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1285 __be32 local_qpn, __be32 remote_qpn)
1287 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1288 ((local_ca_guid == remote_ca_guid) &&
1289 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1292 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1293 struct ib_sa_path_rec *primary_path,
1294 struct ib_sa_path_rec *alt_path)
1296 memset(primary_path, 0, sizeof *primary_path);
1297 primary_path->dgid = req_msg->primary_local_gid;
1298 primary_path->sgid = req_msg->primary_remote_gid;
1299 primary_path->dlid = req_msg->primary_local_lid;
1300 primary_path->slid = req_msg->primary_remote_lid;
1301 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1302 primary_path->hop_limit = req_msg->primary_hop_limit;
1303 primary_path->traffic_class = req_msg->primary_traffic_class;
1304 primary_path->reversible = 1;
1305 primary_path->pkey = req_msg->pkey;
1306 primary_path->sl = cm_req_get_primary_sl(req_msg);
1307 primary_path->mtu_selector = IB_SA_EQ;
1308 primary_path->mtu = cm_req_get_path_mtu(req_msg);
1309 primary_path->rate_selector = IB_SA_EQ;
1310 primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1311 primary_path->packet_life_time_selector = IB_SA_EQ;
1312 primary_path->packet_life_time =
1313 cm_req_get_primary_local_ack_timeout(req_msg);
1314 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1315 primary_path->service_id = req_msg->service_id;
1317 if (req_msg->alt_local_lid) {
1318 memset(alt_path, 0, sizeof *alt_path);
1319 alt_path->dgid = req_msg->alt_local_gid;
1320 alt_path->sgid = req_msg->alt_remote_gid;
1321 alt_path->dlid = req_msg->alt_local_lid;
1322 alt_path->slid = req_msg->alt_remote_lid;
1323 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1324 alt_path->hop_limit = req_msg->alt_hop_limit;
1325 alt_path->traffic_class = req_msg->alt_traffic_class;
1326 alt_path->reversible = 1;
1327 alt_path->pkey = req_msg->pkey;
1328 alt_path->sl = cm_req_get_alt_sl(req_msg);
1329 alt_path->mtu_selector = IB_SA_EQ;
1330 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1331 alt_path->rate_selector = IB_SA_EQ;
1332 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1333 alt_path->packet_life_time_selector = IB_SA_EQ;
1334 alt_path->packet_life_time =
1335 cm_req_get_alt_local_ack_timeout(req_msg);
1336 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1337 alt_path->service_id = req_msg->service_id;
1341 static u16 cm_get_bth_pkey(struct cm_work *work)
1343 struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1344 u8 port_num = work->port->port_num;
1345 u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1349 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1351 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
1352 port_num, pkey_index, ret);
1359 static void cm_format_req_event(struct cm_work *work,
1360 struct cm_id_private *cm_id_priv,
1361 struct ib_cm_id *listen_id)
1363 struct cm_req_msg *req_msg;
1364 struct ib_cm_req_event_param *param;
1366 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1367 param = &work->cm_event.param.req_rcvd;
1368 param->listen_id = listen_id;
1369 param->bth_pkey = cm_get_bth_pkey(work);
1370 param->port = cm_id_priv->av.port->port_num;
1371 param->primary_path = &work->path[0];
1372 if (req_msg->alt_local_lid)
1373 param->alternate_path = &work->path[1];
1375 param->alternate_path = NULL;
1376 param->remote_ca_guid = req_msg->local_ca_guid;
1377 param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1378 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1379 param->qp_type = cm_req_get_qp_type(req_msg);
1380 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1381 param->responder_resources = cm_req_get_init_depth(req_msg);
1382 param->initiator_depth = cm_req_get_resp_res(req_msg);
1383 param->local_cm_response_timeout =
1384 cm_req_get_remote_resp_timeout(req_msg);
1385 param->flow_control = cm_req_get_flow_ctrl(req_msg);
1386 param->remote_cm_response_timeout =
1387 cm_req_get_local_resp_timeout(req_msg);
1388 param->retry_count = cm_req_get_retry_count(req_msg);
1389 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1390 param->srq = cm_req_get_srq(req_msg);
1391 work->cm_event.private_data = &req_msg->private_data;
1394 static void cm_process_work(struct cm_id_private *cm_id_priv,
1395 struct cm_work *work)
1399 /* We will typically only have the current event to report. */
1400 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1403 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1404 spin_lock_irq(&cm_id_priv->lock);
1405 work = cm_dequeue_work(cm_id_priv);
1406 spin_unlock_irq(&cm_id_priv->lock);
1408 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1412 cm_deref_id(cm_id_priv);
1414 cm_destroy_id(&cm_id_priv->id, ret);
1417 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1418 struct cm_id_private *cm_id_priv,
1419 enum cm_msg_response msg_mraed, u8 service_timeout,
1420 const void *private_data, u8 private_data_len)
1422 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1423 cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1424 mra_msg->local_comm_id = cm_id_priv->id.local_id;
1425 mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1426 cm_mra_set_service_timeout(mra_msg, service_timeout);
1428 if (private_data && private_data_len)
1429 memcpy(mra_msg->private_data, private_data, private_data_len);
1432 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1433 struct cm_id_private *cm_id_priv,
1434 enum ib_cm_rej_reason reason,
1437 const void *private_data,
1438 u8 private_data_len)
1440 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1441 rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1443 switch(cm_id_priv->id.state) {
1444 case IB_CM_REQ_RCVD:
1445 rej_msg->local_comm_id = 0;
1446 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1448 case IB_CM_MRA_REQ_SENT:
1449 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1450 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1452 case IB_CM_REP_RCVD:
1453 case IB_CM_MRA_REP_SENT:
1454 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1455 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1458 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1459 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1463 rej_msg->reason = cpu_to_be16(reason);
1464 if (ari && ari_length) {
1465 cm_rej_set_reject_info_len(rej_msg, ari_length);
1466 memcpy(rej_msg->ari, ari, ari_length);
1469 if (private_data && private_data_len)
1470 memcpy(rej_msg->private_data, private_data, private_data_len);
1473 static void cm_dup_req_handler(struct cm_work *work,
1474 struct cm_id_private *cm_id_priv)
1476 struct ib_mad_send_buf *msg = NULL;
1479 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1480 counter[CM_REQ_COUNTER]);
1482 /* Quick state check to discard duplicate REQs. */
1483 if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1486 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1490 spin_lock_irq(&cm_id_priv->lock);
1491 switch (cm_id_priv->id.state) {
1492 case IB_CM_MRA_REQ_SENT:
1493 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1494 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1495 cm_id_priv->private_data,
1496 cm_id_priv->private_data_len);
1498 case IB_CM_TIMEWAIT:
1499 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1500 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1505 spin_unlock_irq(&cm_id_priv->lock);
1507 ret = ib_post_send_mad(msg, NULL);
1512 unlock: spin_unlock_irq(&cm_id_priv->lock);
1513 free: cm_free_msg(msg);
1516 static struct cm_id_private * cm_match_req(struct cm_work *work,
1517 struct cm_id_private *cm_id_priv)
1519 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1520 struct cm_timewait_info *timewait_info;
1521 struct cm_req_msg *req_msg;
1523 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1525 /* Check for possible duplicate REQ. */
1526 spin_lock_irq(&cm.lock);
1527 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1528 if (timewait_info) {
1529 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1530 timewait_info->work.remote_id);
1531 spin_unlock_irq(&cm.lock);
1532 if (cur_cm_id_priv) {
1533 cm_dup_req_handler(work, cur_cm_id_priv);
1534 cm_deref_id(cur_cm_id_priv);
1539 /* Check for stale connections. */
1540 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1541 if (timewait_info) {
1542 cm_cleanup_timewait(cm_id_priv->timewait_info);
1543 spin_unlock_irq(&cm.lock);
1544 cm_issue_rej(work->port, work->mad_recv_wc,
1545 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1550 /* Find matching listen request. */
1551 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1552 req_msg->service_id);
1553 if (!listen_cm_id_priv) {
1554 cm_cleanup_timewait(cm_id_priv->timewait_info);
1555 spin_unlock_irq(&cm.lock);
1556 cm_issue_rej(work->port, work->mad_recv_wc,
1557 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1561 atomic_inc(&listen_cm_id_priv->refcount);
1562 atomic_inc(&cm_id_priv->refcount);
1563 cm_id_priv->id.state = IB_CM_REQ_RCVD;
1564 atomic_inc(&cm_id_priv->work_count);
1565 spin_unlock_irq(&cm.lock);
1567 return listen_cm_id_priv;
1571 * Work-around for inter-subnet connections. If the LIDs are permissive,
1572 * we need to override the LID/SL data in the REQ with the LID information
1573 * in the work completion.
1575 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
1577 if (!cm_req_get_primary_subnet_local(req_msg)) {
1578 if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
1579 req_msg->primary_local_lid = cpu_to_be16(wc->slid);
1580 cm_req_set_primary_sl(req_msg, wc->sl);
1583 if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
1584 req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1587 if (!cm_req_get_alt_subnet_local(req_msg)) {
1588 if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
1589 req_msg->alt_local_lid = cpu_to_be16(wc->slid);
1590 cm_req_set_alt_sl(req_msg, wc->sl);
1593 if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
1594 req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1598 static int cm_req_handler(struct cm_work *work)
1600 struct ib_cm_id *cm_id;
1601 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1602 struct cm_req_msg *req_msg;
1605 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1607 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
1609 return PTR_ERR(cm_id);
1611 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1612 cm_id_priv->id.remote_id = req_msg->local_comm_id;
1613 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1614 work->mad_recv_wc->recv_buf.grh,
1616 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1618 if (IS_ERR(cm_id_priv->timewait_info)) {
1619 ret = PTR_ERR(cm_id_priv->timewait_info);
1622 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1623 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1624 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1626 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1627 if (!listen_cm_id_priv) {
1629 kfree(cm_id_priv->timewait_info);
1633 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1634 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1635 cm_id_priv->id.service_id = req_msg->service_id;
1636 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1638 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1639 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1641 memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN);
1642 work->path[0].vlan_id = cm_id_priv->av.ah_attr.vlan_id;
1643 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
1645 ib_get_cached_gid(work->port->cm_dev->ib_device,
1646 work->port->port_num, 0, &work->path[0].sgid,
1648 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
1649 &work->path[0].sgid, sizeof work->path[0].sgid,
1653 if (req_msg->alt_local_lid) {
1654 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
1656 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
1657 &work->path[0].sgid,
1658 sizeof work->path[0].sgid, NULL, 0);
1662 cm_id_priv->tid = req_msg->hdr.tid;
1663 cm_id_priv->timeout_ms = cm_convert_to_ms(
1664 cm_req_get_local_resp_timeout(req_msg));
1665 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1666 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1667 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1668 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1669 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1670 cm_id_priv->pkey = req_msg->pkey;
1671 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1672 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1673 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1674 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
1676 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1677 cm_process_work(cm_id_priv, work);
1678 cm_deref_id(listen_cm_id_priv);
1682 atomic_dec(&cm_id_priv->refcount);
1683 cm_deref_id(listen_cm_id_priv);
1685 ib_destroy_cm_id(cm_id);
1689 static void cm_format_rep(struct cm_rep_msg *rep_msg,
1690 struct cm_id_private *cm_id_priv,
1691 struct ib_cm_rep_param *param)
1693 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1694 rep_msg->local_comm_id = cm_id_priv->id.local_id;
1695 rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1696 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1697 rep_msg->resp_resources = param->responder_resources;
1698 cm_rep_set_target_ack_delay(rep_msg,
1699 cm_id_priv->av.port->cm_dev->ack_delay);
1700 cm_rep_set_failover(rep_msg, param->failover_accepted);
1701 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1702 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1704 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
1705 rep_msg->initiator_depth = param->initiator_depth;
1706 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1707 cm_rep_set_srq(rep_msg, param->srq);
1708 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1710 cm_rep_set_srq(rep_msg, 1);
1711 cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
1714 if (param->private_data && param->private_data_len)
1715 memcpy(rep_msg->private_data, param->private_data,
1716 param->private_data_len);
1719 int ib_send_cm_rep(struct ib_cm_id *cm_id,
1720 struct ib_cm_rep_param *param)
1722 struct cm_id_private *cm_id_priv;
1723 struct ib_mad_send_buf *msg;
1724 struct cm_rep_msg *rep_msg;
1725 unsigned long flags;
1728 if (param->private_data &&
1729 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1732 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1733 spin_lock_irqsave(&cm_id_priv->lock, flags);
1734 if (cm_id->state != IB_CM_REQ_RCVD &&
1735 cm_id->state != IB_CM_MRA_REQ_SENT) {
1740 ret = cm_alloc_msg(cm_id_priv, &msg);
1744 rep_msg = (struct cm_rep_msg *) msg->mad;
1745 cm_format_rep(rep_msg, cm_id_priv, param);
1746 msg->timeout_ms = cm_id_priv->timeout_ms;
1747 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1749 ret = ib_post_send_mad(msg, NULL);
1751 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1756 cm_id->state = IB_CM_REP_SENT;
1757 cm_id_priv->msg = msg;
1758 cm_id_priv->initiator_depth = param->initiator_depth;
1759 cm_id_priv->responder_resources = param->responder_resources;
1760 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1761 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
1763 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1766 EXPORT_SYMBOL(ib_send_cm_rep);
1768 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1769 struct cm_id_private *cm_id_priv,
1770 const void *private_data,
1771 u8 private_data_len)
1773 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1774 rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1775 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1777 if (private_data && private_data_len)
1778 memcpy(rtu_msg->private_data, private_data, private_data_len);
1781 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1782 const void *private_data,
1783 u8 private_data_len)
1785 struct cm_id_private *cm_id_priv;
1786 struct ib_mad_send_buf *msg;
1787 unsigned long flags;
1791 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1794 data = cm_copy_private_data(private_data, private_data_len);
1796 return PTR_ERR(data);
1798 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1799 spin_lock_irqsave(&cm_id_priv->lock, flags);
1800 if (cm_id->state != IB_CM_REP_RCVD &&
1801 cm_id->state != IB_CM_MRA_REP_SENT) {
1806 ret = cm_alloc_msg(cm_id_priv, &msg);
1810 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1811 private_data, private_data_len);
1813 ret = ib_post_send_mad(msg, NULL);
1815 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1821 cm_id->state = IB_CM_ESTABLISHED;
1822 cm_set_private_data(cm_id_priv, data, private_data_len);
1823 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1826 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1830 EXPORT_SYMBOL(ib_send_cm_rtu);
1832 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
1834 struct cm_rep_msg *rep_msg;
1835 struct ib_cm_rep_event_param *param;
1837 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1838 param = &work->cm_event.param.rep_rcvd;
1839 param->remote_ca_guid = rep_msg->local_ca_guid;
1840 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1841 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
1842 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1843 param->responder_resources = rep_msg->initiator_depth;
1844 param->initiator_depth = rep_msg->resp_resources;
1845 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1846 param->failover_accepted = cm_rep_get_failover(rep_msg);
1847 param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1848 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1849 param->srq = cm_rep_get_srq(rep_msg);
1850 work->cm_event.private_data = &rep_msg->private_data;
1853 static void cm_dup_rep_handler(struct cm_work *work)
1855 struct cm_id_private *cm_id_priv;
1856 struct cm_rep_msg *rep_msg;
1857 struct ib_mad_send_buf *msg = NULL;
1860 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
1861 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
1862 rep_msg->local_comm_id);
1866 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1867 counter[CM_REP_COUNTER]);
1868 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1872 spin_lock_irq(&cm_id_priv->lock);
1873 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
1874 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1875 cm_id_priv->private_data,
1876 cm_id_priv->private_data_len);
1877 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
1878 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1879 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
1880 cm_id_priv->private_data,
1881 cm_id_priv->private_data_len);
1884 spin_unlock_irq(&cm_id_priv->lock);
1886 ret = ib_post_send_mad(msg, NULL);
1891 unlock: spin_unlock_irq(&cm_id_priv->lock);
1892 free: cm_free_msg(msg);
1893 deref: cm_deref_id(cm_id_priv);
1896 static int cm_rep_handler(struct cm_work *work)
1898 struct cm_id_private *cm_id_priv;
1899 struct cm_rep_msg *rep_msg;
1902 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1903 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
1905 cm_dup_rep_handler(work);
1909 cm_format_rep_event(work, cm_id_priv->qp_type);
1911 spin_lock_irq(&cm_id_priv->lock);
1912 switch (cm_id_priv->id.state) {
1913 case IB_CM_REQ_SENT:
1914 case IB_CM_MRA_REQ_RCVD:
1917 spin_unlock_irq(&cm_id_priv->lock);
1922 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
1923 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
1924 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
1926 spin_lock(&cm.lock);
1927 /* Check for duplicate REP. */
1928 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
1929 spin_unlock(&cm.lock);
1930 spin_unlock_irq(&cm_id_priv->lock);
1934 /* Check for a stale connection. */
1935 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
1936 rb_erase(&cm_id_priv->timewait_info->remote_id_node,
1937 &cm.remote_id_table);
1938 cm_id_priv->timewait_info->inserted_remote_id = 0;
1939 spin_unlock(&cm.lock);
1940 spin_unlock_irq(&cm_id_priv->lock);
1941 cm_issue_rej(work->port, work->mad_recv_wc,
1942 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
1947 spin_unlock(&cm.lock);
1949 cm_id_priv->id.state = IB_CM_REP_RCVD;
1950 cm_id_priv->id.remote_id = rep_msg->local_comm_id;
1951 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
1952 cm_id_priv->initiator_depth = rep_msg->resp_resources;
1953 cm_id_priv->responder_resources = rep_msg->initiator_depth;
1954 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
1955 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1956 cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1957 cm_id_priv->av.timeout =
1958 cm_ack_timeout(cm_id_priv->target_ack_delay,
1959 cm_id_priv->av.timeout - 1);
1960 cm_id_priv->alt_av.timeout =
1961 cm_ack_timeout(cm_id_priv->target_ack_delay,
1962 cm_id_priv->alt_av.timeout - 1);
1964 /* todo: handle peer_to_peer */
1966 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1967 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1969 list_add_tail(&work->list, &cm_id_priv->work_list);
1970 spin_unlock_irq(&cm_id_priv->lock);
1973 cm_process_work(cm_id_priv, work);
1975 cm_deref_id(cm_id_priv);
1979 cm_deref_id(cm_id_priv);
1983 static int cm_establish_handler(struct cm_work *work)
1985 struct cm_id_private *cm_id_priv;
1988 /* See comment in cm_establish about lookup. */
1989 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
1993 spin_lock_irq(&cm_id_priv->lock);
1994 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
1995 spin_unlock_irq(&cm_id_priv->lock);
1999 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2000 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2002 list_add_tail(&work->list, &cm_id_priv->work_list);
2003 spin_unlock_irq(&cm_id_priv->lock);
2006 cm_process_work(cm_id_priv, work);
2008 cm_deref_id(cm_id_priv);
2011 cm_deref_id(cm_id_priv);
2015 static int cm_rtu_handler(struct cm_work *work)
2017 struct cm_id_private *cm_id_priv;
2018 struct cm_rtu_msg *rtu_msg;
2021 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2022 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
2023 rtu_msg->local_comm_id);
2027 work->cm_event.private_data = &rtu_msg->private_data;
2029 spin_lock_irq(&cm_id_priv->lock);
2030 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2031 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2032 spin_unlock_irq(&cm_id_priv->lock);
2033 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2034 counter[CM_RTU_COUNTER]);
2037 cm_id_priv->id.state = IB_CM_ESTABLISHED;
2039 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2040 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2042 list_add_tail(&work->list, &cm_id_priv->work_list);
2043 spin_unlock_irq(&cm_id_priv->lock);
2046 cm_process_work(cm_id_priv, work);
2048 cm_deref_id(cm_id_priv);
2051 cm_deref_id(cm_id_priv);
2055 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2056 struct cm_id_private *cm_id_priv,
2057 const void *private_data,
2058 u8 private_data_len)
2060 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2061 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
2062 dreq_msg->local_comm_id = cm_id_priv->id.local_id;
2063 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
2064 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
2066 if (private_data && private_data_len)
2067 memcpy(dreq_msg->private_data, private_data, private_data_len);
2070 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
2071 const void *private_data,
2072 u8 private_data_len)
2074 struct cm_id_private *cm_id_priv;
2075 struct ib_mad_send_buf *msg;
2076 unsigned long flags;
2079 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2082 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2083 spin_lock_irqsave(&cm_id_priv->lock, flags);
2084 if (cm_id->state != IB_CM_ESTABLISHED) {
2089 if (cm_id->lap_state == IB_CM_LAP_SENT ||
2090 cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
2091 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2093 ret = cm_alloc_msg(cm_id_priv, &msg);
2095 cm_enter_timewait(cm_id_priv);
2099 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2100 private_data, private_data_len);
2101 msg->timeout_ms = cm_id_priv->timeout_ms;
2102 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2104 ret = ib_post_send_mad(msg, NULL);
2106 cm_enter_timewait(cm_id_priv);
2107 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2112 cm_id->state = IB_CM_DREQ_SENT;
2113 cm_id_priv->msg = msg;
2114 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2117 EXPORT_SYMBOL(ib_send_cm_dreq);
2119 static void cm_format_drep(struct cm_drep_msg *drep_msg,
2120 struct cm_id_private *cm_id_priv,
2121 const void *private_data,
2122 u8 private_data_len)
2124 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2125 drep_msg->local_comm_id = cm_id_priv->id.local_id;
2126 drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
2128 if (private_data && private_data_len)
2129 memcpy(drep_msg->private_data, private_data, private_data_len);
2132 int ib_send_cm_drep(struct ib_cm_id *cm_id,
2133 const void *private_data,
2134 u8 private_data_len)
2136 struct cm_id_private *cm_id_priv;
2137 struct ib_mad_send_buf *msg;
2138 unsigned long flags;
2142 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2145 data = cm_copy_private_data(private_data, private_data_len);
2147 return PTR_ERR(data);
2149 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2150 spin_lock_irqsave(&cm_id_priv->lock, flags);
2151 if (cm_id->state != IB_CM_DREQ_RCVD) {
2152 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2157 cm_set_private_data(cm_id_priv, data, private_data_len);
2158 cm_enter_timewait(cm_id_priv);
2160 ret = cm_alloc_msg(cm_id_priv, &msg);
2164 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2165 private_data, private_data_len);
2167 ret = ib_post_send_mad(msg, NULL);
2169 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2174 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2177 EXPORT_SYMBOL(ib_send_cm_drep);
2179 static int cm_issue_drep(struct cm_port *port,
2180 struct ib_mad_recv_wc *mad_recv_wc)
2182 struct ib_mad_send_buf *msg = NULL;
2183 struct cm_dreq_msg *dreq_msg;
2184 struct cm_drep_msg *drep_msg;
2187 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2191 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2192 drep_msg = (struct cm_drep_msg *) msg->mad;
2194 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2195 drep_msg->remote_comm_id = dreq_msg->local_comm_id;
2196 drep_msg->local_comm_id = dreq_msg->remote_comm_id;
2198 ret = ib_post_send_mad(msg, NULL);
2205 static int cm_dreq_handler(struct cm_work *work)
2207 struct cm_id_private *cm_id_priv;
2208 struct cm_dreq_msg *dreq_msg;
2209 struct ib_mad_send_buf *msg = NULL;
2212 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2213 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
2214 dreq_msg->local_comm_id);
2216 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2217 counter[CM_DREQ_COUNTER]);
2218 cm_issue_drep(work->port, work->mad_recv_wc);
2222 work->cm_event.private_data = &dreq_msg->private_data;
2224 spin_lock_irq(&cm_id_priv->lock);
2225 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
2228 switch (cm_id_priv->id.state) {
2229 case IB_CM_REP_SENT:
2230 case IB_CM_DREQ_SENT:
2231 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2233 case IB_CM_ESTABLISHED:
2234 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2235 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2236 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2238 case IB_CM_MRA_REP_RCVD:
2240 case IB_CM_TIMEWAIT:
2241 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2242 counter[CM_DREQ_COUNTER]);
2243 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2246 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2247 cm_id_priv->private_data,
2248 cm_id_priv->private_data_len);
2249 spin_unlock_irq(&cm_id_priv->lock);
2251 if (ib_post_send_mad(msg, NULL))
2254 case IB_CM_DREQ_RCVD:
2255 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2256 counter[CM_DREQ_COUNTER]);
2261 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2262 cm_id_priv->tid = dreq_msg->hdr.tid;
2263 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2265 list_add_tail(&work->list, &cm_id_priv->work_list);
2266 spin_unlock_irq(&cm_id_priv->lock);
2269 cm_process_work(cm_id_priv, work);
2271 cm_deref_id(cm_id_priv);
2274 unlock: spin_unlock_irq(&cm_id_priv->lock);
2275 deref: cm_deref_id(cm_id_priv);
2279 static int cm_drep_handler(struct cm_work *work)
2281 struct cm_id_private *cm_id_priv;
2282 struct cm_drep_msg *drep_msg;
2285 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2286 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
2287 drep_msg->local_comm_id);
2291 work->cm_event.private_data = &drep_msg->private_data;
2293 spin_lock_irq(&cm_id_priv->lock);
2294 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2295 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2296 spin_unlock_irq(&cm_id_priv->lock);
2299 cm_enter_timewait(cm_id_priv);
2301 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2302 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2304 list_add_tail(&work->list, &cm_id_priv->work_list);
2305 spin_unlock_irq(&cm_id_priv->lock);
2308 cm_process_work(cm_id_priv, work);
2310 cm_deref_id(cm_id_priv);
2313 cm_deref_id(cm_id_priv);
2317 int ib_send_cm_rej(struct ib_cm_id *cm_id,
2318 enum ib_cm_rej_reason reason,
2321 const void *private_data,
2322 u8 private_data_len)
2324 struct cm_id_private *cm_id_priv;
2325 struct ib_mad_send_buf *msg;
2326 unsigned long flags;
2329 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2330 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2333 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2335 spin_lock_irqsave(&cm_id_priv->lock, flags);
2336 switch (cm_id->state) {
2337 case IB_CM_REQ_SENT:
2338 case IB_CM_MRA_REQ_RCVD:
2339 case IB_CM_REQ_RCVD:
2340 case IB_CM_MRA_REQ_SENT:
2341 case IB_CM_REP_RCVD:
2342 case IB_CM_MRA_REP_SENT:
2343 ret = cm_alloc_msg(cm_id_priv, &msg);
2345 cm_format_rej((struct cm_rej_msg *) msg->mad,
2346 cm_id_priv, reason, ari, ari_length,
2347 private_data, private_data_len);
2349 cm_reset_to_idle(cm_id_priv);
2351 case IB_CM_REP_SENT:
2352 case IB_CM_MRA_REP_RCVD:
2353 ret = cm_alloc_msg(cm_id_priv, &msg);
2355 cm_format_rej((struct cm_rej_msg *) msg->mad,
2356 cm_id_priv, reason, ari, ari_length,
2357 private_data, private_data_len);
2359 cm_enter_timewait(cm_id_priv);
2369 ret = ib_post_send_mad(msg, NULL);
2373 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2376 EXPORT_SYMBOL(ib_send_cm_rej);
2378 static void cm_format_rej_event(struct cm_work *work)
2380 struct cm_rej_msg *rej_msg;
2381 struct ib_cm_rej_event_param *param;
2383 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2384 param = &work->cm_event.param.rej_rcvd;
2385 param->ari = rej_msg->ari;
2386 param->ari_length = cm_rej_get_reject_info_len(rej_msg);
2387 param->reason = __be16_to_cpu(rej_msg->reason);
2388 work->cm_event.private_data = &rej_msg->private_data;
2391 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2393 struct cm_timewait_info *timewait_info;
2394 struct cm_id_private *cm_id_priv;
2397 remote_id = rej_msg->local_comm_id;
2399 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
2400 spin_lock_irq(&cm.lock);
2401 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
2403 if (!timewait_info) {
2404 spin_unlock_irq(&cm.lock);
2407 cm_id_priv = idr_find(&cm.local_id_table, (__force int)
2408 (timewait_info->work.local_id ^
2409 cm.random_id_operand));
2411 if (cm_id_priv->id.remote_id == remote_id)
2412 atomic_inc(&cm_id_priv->refcount);
2416 spin_unlock_irq(&cm.lock);
2417 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2418 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2420 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2425 static int cm_rej_handler(struct cm_work *work)
2427 struct cm_id_private *cm_id_priv;
2428 struct cm_rej_msg *rej_msg;
2431 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2432 cm_id_priv = cm_acquire_rejected_id(rej_msg);
2436 cm_format_rej_event(work);
2438 spin_lock_irq(&cm_id_priv->lock);
2439 switch (cm_id_priv->id.state) {
2440 case IB_CM_REQ_SENT:
2441 case IB_CM_MRA_REQ_RCVD:
2442 case IB_CM_REP_SENT:
2443 case IB_CM_MRA_REP_RCVD:
2444 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2446 case IB_CM_REQ_RCVD:
2447 case IB_CM_MRA_REQ_SENT:
2448 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2449 cm_enter_timewait(cm_id_priv);
2451 cm_reset_to_idle(cm_id_priv);
2453 case IB_CM_DREQ_SENT:
2454 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2456 case IB_CM_REP_RCVD:
2457 case IB_CM_MRA_REP_SENT:
2458 cm_enter_timewait(cm_id_priv);
2460 case IB_CM_ESTABLISHED:
2461 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
2462 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
2463 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
2464 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2466 cm_enter_timewait(cm_id_priv);
2471 spin_unlock_irq(&cm_id_priv->lock);
2476 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2478 list_add_tail(&work->list, &cm_id_priv->work_list);
2479 spin_unlock_irq(&cm_id_priv->lock);
2482 cm_process_work(cm_id_priv, work);
2484 cm_deref_id(cm_id_priv);
2487 cm_deref_id(cm_id_priv);
2491 int ib_send_cm_mra(struct ib_cm_id *cm_id,
2493 const void *private_data,
2494 u8 private_data_len)
2496 struct cm_id_private *cm_id_priv;
2497 struct ib_mad_send_buf *msg;
2498 enum ib_cm_state cm_state;
2499 enum ib_cm_lap_state lap_state;
2500 enum cm_msg_response msg_response;
2502 unsigned long flags;
2505 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2508 data = cm_copy_private_data(private_data, private_data_len);
2510 return PTR_ERR(data);
2512 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2514 spin_lock_irqsave(&cm_id_priv->lock, flags);
2515 switch(cm_id_priv->id.state) {
2516 case IB_CM_REQ_RCVD:
2517 cm_state = IB_CM_MRA_REQ_SENT;
2518 lap_state = cm_id->lap_state;
2519 msg_response = CM_MSG_RESPONSE_REQ;
2521 case IB_CM_REP_RCVD:
2522 cm_state = IB_CM_MRA_REP_SENT;
2523 lap_state = cm_id->lap_state;
2524 msg_response = CM_MSG_RESPONSE_REP;
2526 case IB_CM_ESTABLISHED:
2527 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
2528 cm_state = cm_id->state;
2529 lap_state = IB_CM_MRA_LAP_SENT;
2530 msg_response = CM_MSG_RESPONSE_OTHER;
2538 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
2539 ret = cm_alloc_msg(cm_id_priv, &msg);
2543 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2544 msg_response, service_timeout,
2545 private_data, private_data_len);
2546 ret = ib_post_send_mad(msg, NULL);
2551 cm_id->state = cm_state;
2552 cm_id->lap_state = lap_state;
2553 cm_id_priv->service_timeout = service_timeout;
2554 cm_set_private_data(cm_id_priv, data, private_data_len);
2555 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2558 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2562 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2567 EXPORT_SYMBOL(ib_send_cm_mra);
2569 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2571 switch (cm_mra_get_msg_mraed(mra_msg)) {
2572 case CM_MSG_RESPONSE_REQ:
2573 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2574 case CM_MSG_RESPONSE_REP:
2575 case CM_MSG_RESPONSE_OTHER:
2576 return cm_acquire_id(mra_msg->remote_comm_id,
2577 mra_msg->local_comm_id);
2583 static int cm_mra_handler(struct cm_work *work)
2585 struct cm_id_private *cm_id_priv;
2586 struct cm_mra_msg *mra_msg;
2589 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2590 cm_id_priv = cm_acquire_mraed_id(mra_msg);
2594 work->cm_event.private_data = &mra_msg->private_data;
2595 work->cm_event.param.mra_rcvd.service_timeout =
2596 cm_mra_get_service_timeout(mra_msg);
2597 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2598 cm_convert_to_ms(cm_id_priv->av.timeout);
2600 spin_lock_irq(&cm_id_priv->lock);
2601 switch (cm_id_priv->id.state) {
2602 case IB_CM_REQ_SENT:
2603 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2604 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2605 cm_id_priv->msg, timeout))
2607 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2609 case IB_CM_REP_SENT:
2610 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2611 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2612 cm_id_priv->msg, timeout))
2614 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2616 case IB_CM_ESTABLISHED:
2617 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2618 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2619 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2620 cm_id_priv->msg, timeout)) {
2621 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2622 atomic_long_inc(&work->port->
2623 counter_group[CM_RECV_DUPLICATES].
2624 counter[CM_MRA_COUNTER]);
2627 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2629 case IB_CM_MRA_REQ_RCVD:
2630 case IB_CM_MRA_REP_RCVD:
2631 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2632 counter[CM_MRA_COUNTER]);
2638 cm_id_priv->msg->context[1] = (void *) (unsigned long)
2639 cm_id_priv->id.state;
2640 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2642 list_add_tail(&work->list, &cm_id_priv->work_list);
2643 spin_unlock_irq(&cm_id_priv->lock);
2646 cm_process_work(cm_id_priv, work);
2648 cm_deref_id(cm_id_priv);
2651 spin_unlock_irq(&cm_id_priv->lock);
2652 cm_deref_id(cm_id_priv);
2656 static void cm_format_lap(struct cm_lap_msg *lap_msg,
2657 struct cm_id_private *cm_id_priv,
2658 struct ib_sa_path_rec *alternate_path,
2659 const void *private_data,
2660 u8 private_data_len)
2662 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2663 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2664 lap_msg->local_comm_id = cm_id_priv->id.local_id;
2665 lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2666 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2667 /* todo: need remote CM response timeout */
2668 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2669 lap_msg->alt_local_lid = alternate_path->slid;
2670 lap_msg->alt_remote_lid = alternate_path->dlid;
2671 lap_msg->alt_local_gid = alternate_path->sgid;
2672 lap_msg->alt_remote_gid = alternate_path->dgid;
2673 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2674 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2675 lap_msg->alt_hop_limit = alternate_path->hop_limit;
2676 cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2677 cm_lap_set_sl(lap_msg, alternate_path->sl);
2678 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2679 cm_lap_set_local_ack_timeout(lap_msg,
2680 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
2681 alternate_path->packet_life_time));
2683 if (private_data && private_data_len)
2684 memcpy(lap_msg->private_data, private_data, private_data_len);
2687 int ib_send_cm_lap(struct ib_cm_id *cm_id,
2688 struct ib_sa_path_rec *alternate_path,
2689 const void *private_data,
2690 u8 private_data_len)
2692 struct cm_id_private *cm_id_priv;
2693 struct ib_mad_send_buf *msg;
2694 unsigned long flags;
2697 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2700 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2701 spin_lock_irqsave(&cm_id_priv->lock, flags);
2702 if (cm_id->state != IB_CM_ESTABLISHED ||
2703 (cm_id->lap_state != IB_CM_LAP_UNINIT &&
2704 cm_id->lap_state != IB_CM_LAP_IDLE)) {
2709 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
2712 cm_id_priv->alt_av.timeout =
2713 cm_ack_timeout(cm_id_priv->target_ack_delay,
2714 cm_id_priv->alt_av.timeout - 1);
2716 ret = cm_alloc_msg(cm_id_priv, &msg);
2720 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2721 alternate_path, private_data, private_data_len);
2722 msg->timeout_ms = cm_id_priv->timeout_ms;
2723 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2725 ret = ib_post_send_mad(msg, NULL);
2727 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2732 cm_id->lap_state = IB_CM_LAP_SENT;
2733 cm_id_priv->msg = msg;
2735 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2738 EXPORT_SYMBOL(ib_send_cm_lap);
2740 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
2741 struct ib_sa_path_rec *path,
2742 struct cm_lap_msg *lap_msg)
2744 memset(path, 0, sizeof *path);
2745 path->dgid = lap_msg->alt_local_gid;
2746 path->sgid = lap_msg->alt_remote_gid;
2747 path->dlid = lap_msg->alt_local_lid;
2748 path->slid = lap_msg->alt_remote_lid;
2749 path->flow_label = cm_lap_get_flow_label(lap_msg);
2750 path->hop_limit = lap_msg->alt_hop_limit;
2751 path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2752 path->reversible = 1;
2753 path->pkey = cm_id_priv->pkey;
2754 path->sl = cm_lap_get_sl(lap_msg);
2755 path->mtu_selector = IB_SA_EQ;
2756 path->mtu = cm_id_priv->path_mtu;
2757 path->rate_selector = IB_SA_EQ;
2758 path->rate = cm_lap_get_packet_rate(lap_msg);
2759 path->packet_life_time_selector = IB_SA_EQ;
2760 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2761 path->packet_life_time -= (path->packet_life_time > 0);
2764 static int cm_lap_handler(struct cm_work *work)
2766 struct cm_id_private *cm_id_priv;
2767 struct cm_lap_msg *lap_msg;
2768 struct ib_cm_lap_event_param *param;
2769 struct ib_mad_send_buf *msg = NULL;
2772 /* todo: verify LAP request and send reject APR if invalid. */
2773 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2774 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2775 lap_msg->local_comm_id);
2779 param = &work->cm_event.param.lap_rcvd;
2780 param->alternate_path = &work->path[0];
2781 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
2782 work->cm_event.private_data = &lap_msg->private_data;
2784 spin_lock_irq(&cm_id_priv->lock);
2785 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2788 switch (cm_id_priv->id.lap_state) {
2789 case IB_CM_LAP_UNINIT:
2790 case IB_CM_LAP_IDLE:
2792 case IB_CM_MRA_LAP_SENT:
2793 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2794 counter[CM_LAP_COUNTER]);
2795 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2798 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2799 CM_MSG_RESPONSE_OTHER,
2800 cm_id_priv->service_timeout,
2801 cm_id_priv->private_data,
2802 cm_id_priv->private_data_len);
2803 spin_unlock_irq(&cm_id_priv->lock);
2805 if (ib_post_send_mad(msg, NULL))
2808 case IB_CM_LAP_RCVD:
2809 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2810 counter[CM_LAP_COUNTER]);
2816 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2817 cm_id_priv->tid = lap_msg->hdr.tid;
2818 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2819 work->mad_recv_wc->recv_buf.grh,
2821 cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
2822 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2824 list_add_tail(&work->list, &cm_id_priv->work_list);
2825 spin_unlock_irq(&cm_id_priv->lock);
2828 cm_process_work(cm_id_priv, work);
2830 cm_deref_id(cm_id_priv);
2833 unlock: spin_unlock_irq(&cm_id_priv->lock);
2834 deref: cm_deref_id(cm_id_priv);
2838 static void cm_format_apr(struct cm_apr_msg *apr_msg,
2839 struct cm_id_private *cm_id_priv,
2840 enum ib_cm_apr_status status,
2843 const void *private_data,
2844 u8 private_data_len)
2846 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
2847 apr_msg->local_comm_id = cm_id_priv->id.local_id;
2848 apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
2849 apr_msg->ap_status = (u8) status;
2851 if (info && info_length) {
2852 apr_msg->info_length = info_length;
2853 memcpy(apr_msg->info, info, info_length);
2856 if (private_data && private_data_len)
2857 memcpy(apr_msg->private_data, private_data, private_data_len);
2860 int ib_send_cm_apr(struct ib_cm_id *cm_id,
2861 enum ib_cm_apr_status status,
2864 const void *private_data,
2865 u8 private_data_len)
2867 struct cm_id_private *cm_id_priv;
2868 struct ib_mad_send_buf *msg;
2869 unsigned long flags;
2872 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
2873 (info && info_length > IB_CM_APR_INFO_LENGTH))
2876 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2877 spin_lock_irqsave(&cm_id_priv->lock, flags);
2878 if (cm_id->state != IB_CM_ESTABLISHED ||
2879 (cm_id->lap_state != IB_CM_LAP_RCVD &&
2880 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
2885 ret = cm_alloc_msg(cm_id_priv, &msg);
2889 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
2890 info, info_length, private_data, private_data_len);
2891 ret = ib_post_send_mad(msg, NULL);
2893 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2898 cm_id->lap_state = IB_CM_LAP_IDLE;
2899 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2902 EXPORT_SYMBOL(ib_send_cm_apr);
2904 static int cm_apr_handler(struct cm_work *work)
2906 struct cm_id_private *cm_id_priv;
2907 struct cm_apr_msg *apr_msg;
2910 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
2911 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
2912 apr_msg->local_comm_id);
2914 return -EINVAL; /* Unmatched reply. */
2916 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
2917 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
2918 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
2919 work->cm_event.private_data = &apr_msg->private_data;
2921 spin_lock_irq(&cm_id_priv->lock);
2922 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
2923 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
2924 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
2925 spin_unlock_irq(&cm_id_priv->lock);
2928 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
2929 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2930 cm_id_priv->msg = NULL;
2932 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2934 list_add_tail(&work->list, &cm_id_priv->work_list);
2935 spin_unlock_irq(&cm_id_priv->lock);
2938 cm_process_work(cm_id_priv, work);
2940 cm_deref_id(cm_id_priv);
2943 cm_deref_id(cm_id_priv);
2947 static int cm_timewait_handler(struct cm_work *work)
2949 struct cm_timewait_info *timewait_info;
2950 struct cm_id_private *cm_id_priv;
2953 timewait_info = (struct cm_timewait_info *)work;
2954 spin_lock_irq(&cm.lock);
2955 list_del(&timewait_info->list);
2956 spin_unlock_irq(&cm.lock);
2958 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2959 timewait_info->work.remote_id);
2963 spin_lock_irq(&cm_id_priv->lock);
2964 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
2965 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
2966 spin_unlock_irq(&cm_id_priv->lock);
2969 cm_id_priv->id.state = IB_CM_IDLE;
2970 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2972 list_add_tail(&work->list, &cm_id_priv->work_list);
2973 spin_unlock_irq(&cm_id_priv->lock);
2976 cm_process_work(cm_id_priv, work);
2978 cm_deref_id(cm_id_priv);
2981 cm_deref_id(cm_id_priv);
2985 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
2986 struct cm_id_private *cm_id_priv,
2987 struct ib_cm_sidr_req_param *param)
2989 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
2990 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
2991 sidr_req_msg->request_id = cm_id_priv->id.local_id;
2992 sidr_req_msg->pkey = param->path->pkey;
2993 sidr_req_msg->service_id = param->service_id;
2995 if (param->private_data && param->private_data_len)
2996 memcpy(sidr_req_msg->private_data, param->private_data,
2997 param->private_data_len);
3000 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3001 struct ib_cm_sidr_req_param *param)
3003 struct cm_id_private *cm_id_priv;
3004 struct ib_mad_send_buf *msg;
3005 unsigned long flags;
3008 if (!param->path || (param->private_data &&
3009 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3012 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3013 ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
3017 cm_id->service_id = param->service_id;
3018 cm_id->service_mask = ~cpu_to_be64(0);
3019 cm_id_priv->timeout_ms = param->timeout_ms;
3020 cm_id_priv->max_cm_retries = param->max_cm_retries;
3021 ret = cm_alloc_msg(cm_id_priv, &msg);
3025 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
3027 msg->timeout_ms = cm_id_priv->timeout_ms;
3028 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
3030 spin_lock_irqsave(&cm_id_priv->lock, flags);
3031 if (cm_id->state == IB_CM_IDLE)
3032 ret = ib_post_send_mad(msg, NULL);
3037 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3041 cm_id->state = IB_CM_SIDR_REQ_SENT;
3042 cm_id_priv->msg = msg;
3043 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3047 EXPORT_SYMBOL(ib_send_cm_sidr_req);
3049 static void cm_format_sidr_req_event(struct cm_work *work,
3050 struct ib_cm_id *listen_id)
3052 struct cm_sidr_req_msg *sidr_req_msg;
3053 struct ib_cm_sidr_req_event_param *param;
3055 sidr_req_msg = (struct cm_sidr_req_msg *)
3056 work->mad_recv_wc->recv_buf.mad;
3057 param = &work->cm_event.param.sidr_req_rcvd;
3058 param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
3059 param->listen_id = listen_id;
3060 param->service_id = sidr_req_msg->service_id;
3061 param->bth_pkey = cm_get_bth_pkey(work);
3062 param->port = work->port->port_num;
3063 work->cm_event.private_data = &sidr_req_msg->private_data;
3066 static int cm_sidr_req_handler(struct cm_work *work)
3068 struct ib_cm_id *cm_id;
3069 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
3070 struct cm_sidr_req_msg *sidr_req_msg;
3073 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
3075 return PTR_ERR(cm_id);
3076 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3078 /* Record SGID/SLID and request ID for lookup. */
3079 sidr_req_msg = (struct cm_sidr_req_msg *)
3080 work->mad_recv_wc->recv_buf.mad;
3081 wc = work->mad_recv_wc->wc;
3082 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
3083 cm_id_priv->av.dgid.global.interface_id = 0;
3084 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3085 work->mad_recv_wc->recv_buf.grh,
3087 cm_id_priv->id.remote_id = sidr_req_msg->request_id;
3088 cm_id_priv->tid = sidr_req_msg->hdr.tid;
3089 atomic_inc(&cm_id_priv->work_count);
3091 spin_lock_irq(&cm.lock);
3092 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3093 if (cur_cm_id_priv) {
3094 spin_unlock_irq(&cm.lock);
3095 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3096 counter[CM_SIDR_REQ_COUNTER]);
3097 goto out; /* Duplicate message. */
3099 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3100 cur_cm_id_priv = cm_find_listen(cm_id->device,
3101 sidr_req_msg->service_id);
3102 if (!cur_cm_id_priv) {
3103 spin_unlock_irq(&cm.lock);
3104 cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
3105 goto out; /* No match. */
3107 atomic_inc(&cur_cm_id_priv->refcount);
3108 atomic_inc(&cm_id_priv->refcount);
3109 spin_unlock_irq(&cm.lock);
3111 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
3112 cm_id_priv->id.context = cur_cm_id_priv->id.context;
3113 cm_id_priv->id.service_id = sidr_req_msg->service_id;
3114 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3116 cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
3117 cm_process_work(cm_id_priv, work);
3118 cm_deref_id(cur_cm_id_priv);
3121 ib_destroy_cm_id(&cm_id_priv->id);
3125 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3126 struct cm_id_private *cm_id_priv,
3127 struct ib_cm_sidr_rep_param *param)
3129 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3131 sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
3132 sidr_rep_msg->status = param->status;
3133 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
3134 sidr_rep_msg->service_id = cm_id_priv->id.service_id;
3135 sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
3137 if (param->info && param->info_length)
3138 memcpy(sidr_rep_msg->info, param->info, param->info_length);
3140 if (param->private_data && param->private_data_len)
3141 memcpy(sidr_rep_msg->private_data, param->private_data,
3142 param->private_data_len);
3145 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3146 struct ib_cm_sidr_rep_param *param)
3148 struct cm_id_private *cm_id_priv;
3149 struct ib_mad_send_buf *msg;
3150 unsigned long flags;
3153 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3154 (param->private_data &&
3155 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3158 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3159 spin_lock_irqsave(&cm_id_priv->lock, flags);
3160 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
3165 ret = cm_alloc_msg(cm_id_priv, &msg);
3169 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3171 ret = ib_post_send_mad(msg, NULL);
3173 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3177 cm_id->state = IB_CM_IDLE;
3178 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3180 spin_lock_irqsave(&cm.lock, flags);
3181 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3182 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3183 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3185 spin_unlock_irqrestore(&cm.lock, flags);
3188 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3191 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3193 static void cm_format_sidr_rep_event(struct cm_work *work)
3195 struct cm_sidr_rep_msg *sidr_rep_msg;
3196 struct ib_cm_sidr_rep_event_param *param;
3198 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3199 work->mad_recv_wc->recv_buf.mad;
3200 param = &work->cm_event.param.sidr_rep_rcvd;
3201 param->status = sidr_rep_msg->status;
3202 param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
3203 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
3204 param->info = &sidr_rep_msg->info;
3205 param->info_len = sidr_rep_msg->info_length;
3206 work->cm_event.private_data = &sidr_rep_msg->private_data;
3209 static int cm_sidr_rep_handler(struct cm_work *work)
3211 struct cm_sidr_rep_msg *sidr_rep_msg;
3212 struct cm_id_private *cm_id_priv;
3214 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3215 work->mad_recv_wc->recv_buf.mad;
3216 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
3218 return -EINVAL; /* Unmatched reply. */
3220 spin_lock_irq(&cm_id_priv->lock);
3221 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3222 spin_unlock_irq(&cm_id_priv->lock);
3225 cm_id_priv->id.state = IB_CM_IDLE;
3226 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3227 spin_unlock_irq(&cm_id_priv->lock);
3229 cm_format_sidr_rep_event(work);
3230 cm_process_work(cm_id_priv, work);
3233 cm_deref_id(cm_id_priv);
3237 static void cm_process_send_error(struct ib_mad_send_buf *msg,
3238 enum ib_wc_status wc_status)
3240 struct cm_id_private *cm_id_priv;
3241 struct ib_cm_event cm_event;
3242 enum ib_cm_state state;
3245 memset(&cm_event, 0, sizeof cm_event);
3246 cm_id_priv = msg->context[0];
3248 /* Discard old sends or ones without a response. */
3249 spin_lock_irq(&cm_id_priv->lock);
3250 state = (enum ib_cm_state) (unsigned long) msg->context[1];
3251 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
3255 case IB_CM_REQ_SENT:
3256 case IB_CM_MRA_REQ_RCVD:
3257 cm_reset_to_idle(cm_id_priv);
3258 cm_event.event = IB_CM_REQ_ERROR;
3260 case IB_CM_REP_SENT:
3261 case IB_CM_MRA_REP_RCVD:
3262 cm_reset_to_idle(cm_id_priv);
3263 cm_event.event = IB_CM_REP_ERROR;
3265 case IB_CM_DREQ_SENT:
3266 cm_enter_timewait(cm_id_priv);
3267 cm_event.event = IB_CM_DREQ_ERROR;
3269 case IB_CM_SIDR_REQ_SENT:
3270 cm_id_priv->id.state = IB_CM_IDLE;
3271 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3276 spin_unlock_irq(&cm_id_priv->lock);
3277 cm_event.param.send_status = wc_status;
3279 /* No other events can occur on the cm_id at this point. */
3280 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3283 ib_destroy_cm_id(&cm_id_priv->id);
3286 spin_unlock_irq(&cm_id_priv->lock);
3290 static void cm_send_handler(struct ib_mad_agent *mad_agent,
3291 struct ib_mad_send_wc *mad_send_wc)
3293 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3294 struct cm_port *port;
3297 port = mad_agent->context;
3298 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3299 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3302 * If the send was in response to a received message (context[0] is not
3303 * set to a cm_id), and is not a REJ, then it is a send that was
3306 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3309 atomic_long_add(1 + msg->retries,
3310 &port->counter_group[CM_XMIT].counter[attr_index]);
3312 atomic_long_add(msg->retries,
3313 &port->counter_group[CM_XMIT_RETRIES].
3314 counter[attr_index]);
3316 switch (mad_send_wc->status) {
3318 case IB_WC_WR_FLUSH_ERR:
3322 if (msg->context[0] && msg->context[1])
3323 cm_process_send_error(msg, mad_send_wc->status);
3330 static void cm_work_handler(struct work_struct *_work)
3332 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3335 switch (work->cm_event.event) {
3336 case IB_CM_REQ_RECEIVED:
3337 ret = cm_req_handler(work);
3339 case IB_CM_MRA_RECEIVED:
3340 ret = cm_mra_handler(work);
3342 case IB_CM_REJ_RECEIVED:
3343 ret = cm_rej_handler(work);
3345 case IB_CM_REP_RECEIVED:
3346 ret = cm_rep_handler(work);
3348 case IB_CM_RTU_RECEIVED:
3349 ret = cm_rtu_handler(work);
3351 case IB_CM_USER_ESTABLISHED:
3352 ret = cm_establish_handler(work);
3354 case IB_CM_DREQ_RECEIVED:
3355 ret = cm_dreq_handler(work);
3357 case IB_CM_DREP_RECEIVED:
3358 ret = cm_drep_handler(work);
3360 case IB_CM_SIDR_REQ_RECEIVED:
3361 ret = cm_sidr_req_handler(work);
3363 case IB_CM_SIDR_REP_RECEIVED:
3364 ret = cm_sidr_rep_handler(work);
3366 case IB_CM_LAP_RECEIVED:
3367 ret = cm_lap_handler(work);
3369 case IB_CM_APR_RECEIVED:
3370 ret = cm_apr_handler(work);
3372 case IB_CM_TIMEWAIT_EXIT:
3373 ret = cm_timewait_handler(work);
3383 static int cm_establish(struct ib_cm_id *cm_id)
3385 struct cm_id_private *cm_id_priv;
3386 struct cm_work *work;
3387 unsigned long flags;
3389 struct cm_device *cm_dev;
3391 cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3395 work = kmalloc(sizeof *work, GFP_ATOMIC);
3399 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3400 spin_lock_irqsave(&cm_id_priv->lock, flags);
3401 switch (cm_id->state)
3403 case IB_CM_REP_SENT:
3404 case IB_CM_MRA_REP_RCVD:
3405 cm_id->state = IB_CM_ESTABLISHED;
3407 case IB_CM_ESTABLISHED:
3414 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3422 * The CM worker thread may try to destroy the cm_id before it
3423 * can execute this work item. To prevent potential deadlock,
3424 * we need to find the cm_id once we're in the context of the
3425 * worker thread, rather than holding a reference on it.
3427 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3428 work->local_id = cm_id->local_id;
3429 work->remote_id = cm_id->remote_id;
3430 work->mad_recv_wc = NULL;
3431 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3433 /* Check if the device started its remove_one */
3434 spin_lock_irq(&cm.lock);
3435 if (!cm_dev->going_down) {
3436 queue_delayed_work(cm.wq, &work->work, 0);
3441 spin_unlock_irq(&cm.lock);
3447 static int cm_migrate(struct ib_cm_id *cm_id)
3449 struct cm_id_private *cm_id_priv;
3450 unsigned long flags;
3453 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3454 spin_lock_irqsave(&cm_id_priv->lock, flags);
3455 if (cm_id->state == IB_CM_ESTABLISHED &&
3456 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3457 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3458 cm_id->lap_state = IB_CM_LAP_IDLE;
3459 cm_id_priv->av = cm_id_priv->alt_av;
3462 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3467 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3472 case IB_EVENT_COMM_EST:
3473 ret = cm_establish(cm_id);
3475 case IB_EVENT_PATH_MIG:
3476 ret = cm_migrate(cm_id);
3483 EXPORT_SYMBOL(ib_cm_notify);
3485 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3486 struct ib_mad_recv_wc *mad_recv_wc)
3488 struct cm_port *port = mad_agent->context;
3489 struct cm_work *work;
3490 enum ib_cm_event_type event;
3495 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
3496 case CM_REQ_ATTR_ID:
3497 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
3498 alt_local_lid != 0);
3499 event = IB_CM_REQ_RECEIVED;
3501 case CM_MRA_ATTR_ID:
3502 event = IB_CM_MRA_RECEIVED;
3504 case CM_REJ_ATTR_ID:
3505 event = IB_CM_REJ_RECEIVED;
3507 case CM_REP_ATTR_ID:
3508 event = IB_CM_REP_RECEIVED;
3510 case CM_RTU_ATTR_ID:
3511 event = IB_CM_RTU_RECEIVED;
3513 case CM_DREQ_ATTR_ID:
3514 event = IB_CM_DREQ_RECEIVED;
3516 case CM_DREP_ATTR_ID:
3517 event = IB_CM_DREP_RECEIVED;
3519 case CM_SIDR_REQ_ATTR_ID:
3520 event = IB_CM_SIDR_REQ_RECEIVED;
3522 case CM_SIDR_REP_ATTR_ID:
3523 event = IB_CM_SIDR_REP_RECEIVED;
3525 case CM_LAP_ATTR_ID:
3527 event = IB_CM_LAP_RECEIVED;
3529 case CM_APR_ATTR_ID:
3530 event = IB_CM_APR_RECEIVED;
3533 ib_free_recv_mad(mad_recv_wc);
3537 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
3538 atomic_long_inc(&port->counter_group[CM_RECV].
3539 counter[attr_id - CM_ATTR_ID_OFFSET]);
3541 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3544 ib_free_recv_mad(mad_recv_wc);
3548 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3549 work->cm_event.event = event;
3550 work->mad_recv_wc = mad_recv_wc;
3553 /* Check if the device started its remove_one */
3554 spin_lock_irq(&cm.lock);
3555 if (!port->cm_dev->going_down)
3556 queue_delayed_work(cm.wq, &work->work, 0);
3559 spin_unlock_irq(&cm.lock);
3563 ib_free_recv_mad(mad_recv_wc);
3567 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3568 struct ib_qp_attr *qp_attr,
3571 unsigned long flags;
3574 spin_lock_irqsave(&cm_id_priv->lock, flags);
3575 switch (cm_id_priv->id.state) {
3576 case IB_CM_REQ_SENT:
3577 case IB_CM_MRA_REQ_RCVD:
3578 case IB_CM_REQ_RCVD:
3579 case IB_CM_MRA_REQ_SENT:
3580 case IB_CM_REP_RCVD:
3581 case IB_CM_MRA_REP_SENT:
3582 case IB_CM_REP_SENT:
3583 case IB_CM_MRA_REP_RCVD:
3584 case IB_CM_ESTABLISHED:
3585 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3586 IB_QP_PKEY_INDEX | IB_QP_PORT;
3587 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
3588 if (cm_id_priv->responder_resources)
3589 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
3590 IB_ACCESS_REMOTE_ATOMIC;
3591 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3592 qp_attr->port_num = cm_id_priv->av.port->port_num;
3599 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3603 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3604 struct ib_qp_attr *qp_attr,
3607 unsigned long flags;
3610 spin_lock_irqsave(&cm_id_priv->lock, flags);
3611 switch (cm_id_priv->id.state) {
3612 case IB_CM_REQ_RCVD:
3613 case IB_CM_MRA_REQ_SENT:
3614 case IB_CM_REP_RCVD:
3615 case IB_CM_MRA_REP_SENT:
3616 case IB_CM_REP_SENT:
3617 case IB_CM_MRA_REP_RCVD:
3618 case IB_CM_ESTABLISHED:
3619 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3620 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
3621 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3622 if (!cm_id_priv->av.valid) {
3623 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3626 if (cm_id_priv->av.ah_attr.vlan_id != 0xffff) {
3627 qp_attr->vlan_id = cm_id_priv->av.ah_attr.vlan_id;
3628 *qp_attr_mask |= IB_QP_VID;
3630 if (!is_zero_ether_addr(cm_id_priv->av.smac)) {
3631 memcpy(qp_attr->smac, cm_id_priv->av.smac,
3632 sizeof(qp_attr->smac));
3633 *qp_attr_mask |= IB_QP_SMAC;
3635 if (cm_id_priv->alt_av.valid) {
3636 if (cm_id_priv->alt_av.ah_attr.vlan_id != 0xffff) {
3637 qp_attr->alt_vlan_id =
3638 cm_id_priv->alt_av.ah_attr.vlan_id;
3639 *qp_attr_mask |= IB_QP_ALT_VID;
3641 if (!is_zero_ether_addr(cm_id_priv->alt_av.smac)) {
3642 memcpy(qp_attr->alt_smac,
3643 cm_id_priv->alt_av.smac,
3644 sizeof(qp_attr->alt_smac));
3645 *qp_attr_mask |= IB_QP_ALT_SMAC;
3648 qp_attr->path_mtu = cm_id_priv->path_mtu;
3649 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3650 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3651 if (cm_id_priv->qp_type == IB_QPT_RC ||
3652 cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
3653 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
3654 IB_QP_MIN_RNR_TIMER;
3655 qp_attr->max_dest_rd_atomic =
3656 cm_id_priv->responder_resources;
3657 qp_attr->min_rnr_timer = 0;
3659 if (cm_id_priv->alt_av.ah_attr.dlid) {
3660 *qp_attr_mask |= IB_QP_ALT_PATH;
3661 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3662 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3663 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3664 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3672 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3676 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3677 struct ib_qp_attr *qp_attr,
3680 unsigned long flags;
3683 spin_lock_irqsave(&cm_id_priv->lock, flags);
3684 switch (cm_id_priv->id.state) {
3685 /* Allow transition to RTS before sending REP */
3686 case IB_CM_REQ_RCVD:
3687 case IB_CM_MRA_REQ_SENT:
3689 case IB_CM_REP_RCVD:
3690 case IB_CM_MRA_REP_SENT:
3691 case IB_CM_REP_SENT:
3692 case IB_CM_MRA_REP_RCVD:
3693 case IB_CM_ESTABLISHED:
3694 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
3695 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
3696 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3697 switch (cm_id_priv->qp_type) {
3699 case IB_QPT_XRC_INI:
3700 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
3701 IB_QP_MAX_QP_RD_ATOMIC;
3702 qp_attr->retry_cnt = cm_id_priv->retry_count;
3703 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3704 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
3706 case IB_QPT_XRC_TGT:
3707 *qp_attr_mask |= IB_QP_TIMEOUT;
3708 qp_attr->timeout = cm_id_priv->av.timeout;
3713 if (cm_id_priv->alt_av.ah_attr.dlid) {
3714 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3715 qp_attr->path_mig_state = IB_MIG_REARM;
3718 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
3719 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3720 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3721 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3722 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3723 qp_attr->path_mig_state = IB_MIG_REARM;
3731 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3735 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3736 struct ib_qp_attr *qp_attr,
3739 struct cm_id_private *cm_id_priv;
3742 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3743 switch (qp_attr->qp_state) {
3745 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3748 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3751 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3759 EXPORT_SYMBOL(ib_cm_init_qp_attr);
3761 static void cm_get_ack_delay(struct cm_device *cm_dev)
3763 struct ib_device_attr attr;
3765 if (ib_query_device(cm_dev->ib_device, &attr))
3766 cm_dev->ack_delay = 0; /* acks will rely on packet life time */
3768 cm_dev->ack_delay = attr.local_ca_ack_delay;
3771 static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
3774 struct cm_counter_group *group;
3775 struct cm_counter_attribute *cm_attr;
3777 group = container_of(obj, struct cm_counter_group, obj);
3778 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
3780 return sprintf(buf, "%ld\n",
3781 atomic_long_read(&group->counter[cm_attr->index]));
3784 static const struct sysfs_ops cm_counter_ops = {
3785 .show = cm_show_counter
3788 static struct kobj_type cm_counter_obj_type = {
3789 .sysfs_ops = &cm_counter_ops,
3790 .default_attrs = cm_counter_default_attrs
3793 static void cm_release_port_obj(struct kobject *obj)
3795 struct cm_port *cm_port;
3797 cm_port = container_of(obj, struct cm_port, port_obj);
3801 static struct kobj_type cm_port_obj_type = {
3802 .release = cm_release_port_obj
3805 static char *cm_devnode(struct device *dev, umode_t *mode)
3809 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
3812 struct class cm_class = {
3813 .owner = THIS_MODULE,
3814 .name = "infiniband_cm",
3815 .devnode = cm_devnode,
3817 EXPORT_SYMBOL(cm_class);
3819 static int cm_create_port_fs(struct cm_port *port)
3823 ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
3824 &port->cm_dev->device->kobj,
3825 "%d", port->port_num);
3831 for (i = 0; i < CM_COUNTER_GROUPS; i++) {
3832 ret = kobject_init_and_add(&port->counter_group[i].obj,
3833 &cm_counter_obj_type,
3835 "%s", counter_group_names[i]);
3844 kobject_put(&port->counter_group[i].obj);
3845 kobject_put(&port->port_obj);
3850 static void cm_remove_port_fs(struct cm_port *port)
3854 for (i = 0; i < CM_COUNTER_GROUPS; i++)
3855 kobject_put(&port->counter_group[i].obj);
3857 kobject_put(&port->port_obj);
3860 static void cm_add_one(struct ib_device *ib_device)
3862 struct cm_device *cm_dev;
3863 struct cm_port *port;
3864 struct ib_mad_reg_req reg_req = {
3865 .mgmt_class = IB_MGMT_CLASS_CM,
3866 .mgmt_class_version = IB_CM_CLASS_VERSION,
3868 struct ib_port_modify port_modify = {
3869 .set_port_cap_mask = IB_PORT_CM_SUP
3871 unsigned long flags;
3876 cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
3877 ib_device->phys_port_cnt, GFP_KERNEL);
3881 cm_dev->ib_device = ib_device;
3882 cm_get_ack_delay(cm_dev);
3883 cm_dev->going_down = 0;
3884 cm_dev->device = device_create(&cm_class, &ib_device->dev,
3886 "%s", ib_device->name);
3887 if (IS_ERR(cm_dev->device)) {
3892 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3893 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
3894 if (!rdma_cap_ib_cm(ib_device, i))
3897 port = kzalloc(sizeof *port, GFP_KERNEL);
3901 cm_dev->port[i-1] = port;
3902 port->cm_dev = cm_dev;
3905 ret = cm_create_port_fs(port);
3909 port->mad_agent = ib_register_mad_agent(ib_device, i,
3917 if (IS_ERR(port->mad_agent))
3920 ret = ib_modify_port(ib_device, i, 0, &port_modify);
3930 ib_set_client_data(ib_device, &cm_client, cm_dev);
3932 write_lock_irqsave(&cm.device_lock, flags);
3933 list_add_tail(&cm_dev->list, &cm.device_list);
3934 write_unlock_irqrestore(&cm.device_lock, flags);
3938 ib_unregister_mad_agent(port->mad_agent);
3940 cm_remove_port_fs(port);
3942 port_modify.set_port_cap_mask = 0;
3943 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
3945 if (!rdma_cap_ib_cm(ib_device, i))
3948 port = cm_dev->port[i-1];
3949 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
3950 ib_unregister_mad_agent(port->mad_agent);
3951 cm_remove_port_fs(port);
3954 device_unregister(cm_dev->device);
3958 static void cm_remove_one(struct ib_device *ib_device, void *client_data)
3960 struct cm_device *cm_dev = client_data;
3961 struct cm_port *port;
3962 struct ib_port_modify port_modify = {
3963 .clr_port_cap_mask = IB_PORT_CM_SUP
3965 unsigned long flags;
3971 write_lock_irqsave(&cm.device_lock, flags);
3972 list_del(&cm_dev->list);
3973 write_unlock_irqrestore(&cm.device_lock, flags);
3975 spin_lock_irq(&cm.lock);
3976 cm_dev->going_down = 1;
3977 spin_unlock_irq(&cm.lock);
3979 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
3980 if (!rdma_cap_ib_cm(ib_device, i))
3983 port = cm_dev->port[i-1];
3984 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
3986 * We flush the queue here after the going_down set, this
3987 * verify that no new works will be queued in the recv handler,
3988 * after that we can call the unregister_mad_agent
3990 flush_workqueue(cm.wq);
3991 ib_unregister_mad_agent(port->mad_agent);
3992 cm_remove_port_fs(port);
3994 device_unregister(cm_dev->device);
3998 static int __init ib_cm_init(void)
4002 memset(&cm, 0, sizeof cm);
4003 INIT_LIST_HEAD(&cm.device_list);
4004 rwlock_init(&cm.device_lock);
4005 spin_lock_init(&cm.lock);
4006 cm.listen_service_table = RB_ROOT;
4007 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4008 cm.remote_id_table = RB_ROOT;
4009 cm.remote_qp_table = RB_ROOT;
4010 cm.remote_sidr_table = RB_ROOT;
4011 idr_init(&cm.local_id_table);
4012 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4013 INIT_LIST_HEAD(&cm.timewait_list);
4015 ret = class_register(&cm_class);
4021 cm.wq = create_workqueue("ib_cm");
4027 ret = ib_register_client(&cm_client);
4033 destroy_workqueue(cm.wq);
4035 class_unregister(&cm_class);
4037 idr_destroy(&cm.local_id_table);
4041 static void __exit ib_cm_cleanup(void)
4043 struct cm_timewait_info *timewait_info, *tmp;
4045 spin_lock_irq(&cm.lock);
4046 list_for_each_entry(timewait_info, &cm.timewait_list, list)
4047 cancel_delayed_work(&timewait_info->work.work);
4048 spin_unlock_irq(&cm.lock);
4050 ib_unregister_client(&cm_client);
4051 destroy_workqueue(cm.wq);
4053 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4054 list_del(&timewait_info->list);
4055 kfree(timewait_info);
4058 class_unregister(&cm_class);
4059 idr_destroy(&cm.local_id_table);
4062 module_init(ib_cm_init);
4063 module_exit(ib_cm_cleanup);