2 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/device.h>
39 #include <linux/module.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/interrupt.h>
43 #include <linux/random.h>
44 #include <linux/rbtree.h>
45 #include <linux/spinlock.h>
46 #include <linux/slab.h>
47 #include <linux/sysfs.h>
48 #include <linux/workqueue.h>
49 #include <linux/kdev_t.h>
50 #include <linux/etherdevice.h>
52 #include <rdma/ib_cache.h>
53 #include <rdma/ib_cm.h>
56 MODULE_AUTHOR("Sean Hefty");
57 MODULE_DESCRIPTION("InfiniBand CM");
58 MODULE_LICENSE("Dual BSD/GPL");
60 static void cm_add_one(struct ib_device *device);
61 static void cm_remove_one(struct ib_device *device, void *client_data);
63 static struct ib_client cm_client = {
66 .remove = cm_remove_one
71 struct list_head device_list;
73 struct rb_root listen_service_table;
74 u64 listen_service_id;
75 /* struct rb_root peer_service_table; todo: fix peer to peer */
76 struct rb_root remote_qp_table;
77 struct rb_root remote_id_table;
78 struct rb_root remote_sidr_table;
79 struct idr local_id_table;
80 __be32 random_id_operand;
81 struct list_head timewait_list;
82 struct workqueue_struct *wq;
85 /* Counter indexes ordered by attribute ID */
99 CM_ATTR_ID_OFFSET = 0x0010,
110 static char const counter_group_names[CM_COUNTER_GROUPS]
111 [sizeof("cm_rx_duplicates")] = {
112 "cm_tx_msgs", "cm_tx_retries",
113 "cm_rx_msgs", "cm_rx_duplicates"
116 struct cm_counter_group {
118 atomic_long_t counter[CM_ATTR_COUNT];
121 struct cm_counter_attribute {
122 struct attribute attr;
126 #define CM_COUNTER_ATTR(_name, _index) \
127 struct cm_counter_attribute cm_##_name##_counter_attr = { \
128 .attr = { .name = __stringify(_name), .mode = 0444 }, \
132 static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
133 static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
134 static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
135 static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
136 static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
137 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
138 static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
139 static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
140 static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
141 static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
142 static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
144 static struct attribute *cm_counter_default_attrs[] = {
145 &cm_req_counter_attr.attr,
146 &cm_mra_counter_attr.attr,
147 &cm_rej_counter_attr.attr,
148 &cm_rep_counter_attr.attr,
149 &cm_rtu_counter_attr.attr,
150 &cm_dreq_counter_attr.attr,
151 &cm_drep_counter_attr.attr,
152 &cm_sidr_req_counter_attr.attr,
153 &cm_sidr_rep_counter_attr.attr,
154 &cm_lap_counter_attr.attr,
155 &cm_apr_counter_attr.attr,
160 struct cm_device *cm_dev;
161 struct ib_mad_agent *mad_agent;
162 struct kobject port_obj;
164 struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
168 struct list_head list;
169 struct ib_device *ib_device;
170 struct device *device;
173 struct cm_port *port[0];
177 struct cm_port *port;
179 struct ib_ah_attr ah_attr;
187 struct delayed_work work;
188 struct list_head list;
189 struct cm_port *port;
190 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
191 __be32 local_id; /* Established / timewait */
193 struct ib_cm_event cm_event;
194 struct ib_sa_path_rec path[0];
197 struct cm_timewait_info {
198 struct cm_work work; /* Must be first. */
199 struct list_head list;
200 struct rb_node remote_qp_node;
201 struct rb_node remote_id_node;
202 __be64 remote_ca_guid;
204 u8 inserted_remote_qp;
205 u8 inserted_remote_id;
208 struct cm_id_private {
211 struct rb_node service_node;
212 struct rb_node sidr_id_node;
213 spinlock_t lock; /* Do not acquire inside cm.lock */
214 struct completion comp;
216 /* Number of clients sharing this ib_cm_id. Only valid for listeners.
217 * Protected by the cm.lock spinlock. */
218 int listen_sharecount;
220 struct ib_mad_send_buf *msg;
221 struct cm_timewait_info *timewait_info;
222 /* todo: use alternate port on send failure */
230 enum ib_qp_type qp_type;
234 enum ib_mtu path_mtu;
239 u8 responder_resources;
246 struct list_head work_list;
250 static void cm_work_handler(struct work_struct *work);
252 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
254 if (atomic_dec_and_test(&cm_id_priv->refcount))
255 complete(&cm_id_priv->comp);
258 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
259 struct ib_mad_send_buf **msg)
261 struct ib_mad_agent *mad_agent;
262 struct ib_mad_send_buf *m;
265 mad_agent = cm_id_priv->av.port->mad_agent;
266 ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
270 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
271 cm_id_priv->av.pkey_index,
272 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
274 IB_MGMT_BASE_VERSION);
280 /* Timeout set by caller if response is expected. */
282 m->retries = cm_id_priv->max_cm_retries;
284 atomic_inc(&cm_id_priv->refcount);
285 m->context[0] = cm_id_priv;
290 static int cm_alloc_response_msg(struct cm_port *port,
291 struct ib_mad_recv_wc *mad_recv_wc,
292 struct ib_mad_send_buf **msg)
294 struct ib_mad_send_buf *m;
297 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
298 mad_recv_wc->recv_buf.grh, port->port_num);
302 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
303 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
305 IB_MGMT_BASE_VERSION);
315 static void cm_free_msg(struct ib_mad_send_buf *msg)
317 ib_destroy_ah(msg->ah);
319 cm_deref_id(msg->context[0]);
320 ib_free_send_mad(msg);
323 static void * cm_copy_private_data(const void *private_data,
328 if (!private_data || !private_data_len)
331 data = kmemdup(private_data, private_data_len, GFP_KERNEL);
333 return ERR_PTR(-ENOMEM);
338 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
339 void *private_data, u8 private_data_len)
341 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
342 kfree(cm_id_priv->private_data);
344 cm_id_priv->private_data = private_data;
345 cm_id_priv->private_data_len = private_data_len;
348 static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
349 struct ib_grh *grh, struct cm_av *av)
352 av->pkey_index = wc->pkey_index;
353 ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
357 static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
359 struct cm_device *cm_dev;
360 struct cm_port *port = NULL;
365 read_lock_irqsave(&cm.device_lock, flags);
366 list_for_each_entry(cm_dev, &cm.device_list, list) {
367 if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid,
369 port = cm_dev->port[p-1];
373 read_unlock_irqrestore(&cm.device_lock, flags);
378 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
379 be16_to_cpu(path->pkey), &av->pkey_index);
384 ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
386 av->timeout = path->packet_life_time + 1;
387 memcpy(av->smac, path->smac, sizeof(av->smac));
393 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
398 idr_preload(GFP_KERNEL);
399 spin_lock_irqsave(&cm.lock, flags);
401 id = idr_alloc_cyclic(&cm.local_id_table, cm_id_priv, 0, 0, GFP_NOWAIT);
403 spin_unlock_irqrestore(&cm.lock, flags);
406 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
407 return id < 0 ? id : 0;
410 static void cm_free_id(__be32 local_id)
412 spin_lock_irq(&cm.lock);
413 idr_remove(&cm.local_id_table,
414 (__force int) (local_id ^ cm.random_id_operand));
415 spin_unlock_irq(&cm.lock);
418 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
420 struct cm_id_private *cm_id_priv;
422 cm_id_priv = idr_find(&cm.local_id_table,
423 (__force int) (local_id ^ cm.random_id_operand));
425 if (cm_id_priv->id.remote_id == remote_id)
426 atomic_inc(&cm_id_priv->refcount);
434 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
436 struct cm_id_private *cm_id_priv;
438 spin_lock_irq(&cm.lock);
439 cm_id_priv = cm_get_id(local_id, remote_id);
440 spin_unlock_irq(&cm.lock);
446 * Trivial helpers to strip endian annotation and compare; the
447 * endianness doesn't actually matter since we just need a stable
448 * order for the RB tree.
450 static int be32_lt(__be32 a, __be32 b)
452 return (__force u32) a < (__force u32) b;
455 static int be32_gt(__be32 a, __be32 b)
457 return (__force u32) a > (__force u32) b;
460 static int be64_lt(__be64 a, __be64 b)
462 return (__force u64) a < (__force u64) b;
465 static int be64_gt(__be64 a, __be64 b)
467 return (__force u64) a > (__force u64) b;
470 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
472 struct rb_node **link = &cm.listen_service_table.rb_node;
473 struct rb_node *parent = NULL;
474 struct cm_id_private *cur_cm_id_priv;
475 __be64 service_id = cm_id_priv->id.service_id;
476 __be64 service_mask = cm_id_priv->id.service_mask;
480 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
482 if ((cur_cm_id_priv->id.service_mask & service_id) ==
483 (service_mask & cur_cm_id_priv->id.service_id) &&
484 (cm_id_priv->id.device == cur_cm_id_priv->id.device))
485 return cur_cm_id_priv;
487 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
488 link = &(*link)->rb_left;
489 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
490 link = &(*link)->rb_right;
491 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
492 link = &(*link)->rb_left;
493 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
494 link = &(*link)->rb_right;
496 link = &(*link)->rb_right;
498 rb_link_node(&cm_id_priv->service_node, parent, link);
499 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
503 static struct cm_id_private * cm_find_listen(struct ib_device *device,
506 struct rb_node *node = cm.listen_service_table.rb_node;
507 struct cm_id_private *cm_id_priv;
510 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
511 if ((cm_id_priv->id.service_mask & service_id) ==
512 cm_id_priv->id.service_id &&
513 (cm_id_priv->id.device == device))
516 if (device < cm_id_priv->id.device)
517 node = node->rb_left;
518 else if (device > cm_id_priv->id.device)
519 node = node->rb_right;
520 else if (be64_lt(service_id, cm_id_priv->id.service_id))
521 node = node->rb_left;
522 else if (be64_gt(service_id, cm_id_priv->id.service_id))
523 node = node->rb_right;
525 node = node->rb_right;
530 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
533 struct rb_node **link = &cm.remote_id_table.rb_node;
534 struct rb_node *parent = NULL;
535 struct cm_timewait_info *cur_timewait_info;
536 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
537 __be32 remote_id = timewait_info->work.remote_id;
541 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
543 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
544 link = &(*link)->rb_left;
545 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
546 link = &(*link)->rb_right;
547 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
548 link = &(*link)->rb_left;
549 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
550 link = &(*link)->rb_right;
552 return cur_timewait_info;
554 timewait_info->inserted_remote_id = 1;
555 rb_link_node(&timewait_info->remote_id_node, parent, link);
556 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
560 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
563 struct rb_node *node = cm.remote_id_table.rb_node;
564 struct cm_timewait_info *timewait_info;
567 timewait_info = rb_entry(node, struct cm_timewait_info,
569 if (be32_lt(remote_id, timewait_info->work.remote_id))
570 node = node->rb_left;
571 else if (be32_gt(remote_id, timewait_info->work.remote_id))
572 node = node->rb_right;
573 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
574 node = node->rb_left;
575 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
576 node = node->rb_right;
578 return timewait_info;
583 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
586 struct rb_node **link = &cm.remote_qp_table.rb_node;
587 struct rb_node *parent = NULL;
588 struct cm_timewait_info *cur_timewait_info;
589 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
590 __be32 remote_qpn = timewait_info->remote_qpn;
594 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
596 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
597 link = &(*link)->rb_left;
598 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
599 link = &(*link)->rb_right;
600 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
601 link = &(*link)->rb_left;
602 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
603 link = &(*link)->rb_right;
605 return cur_timewait_info;
607 timewait_info->inserted_remote_qp = 1;
608 rb_link_node(&timewait_info->remote_qp_node, parent, link);
609 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
613 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
616 struct rb_node **link = &cm.remote_sidr_table.rb_node;
617 struct rb_node *parent = NULL;
618 struct cm_id_private *cur_cm_id_priv;
619 union ib_gid *port_gid = &cm_id_priv->av.dgid;
620 __be32 remote_id = cm_id_priv->id.remote_id;
624 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
626 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
627 link = &(*link)->rb_left;
628 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
629 link = &(*link)->rb_right;
632 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
635 link = &(*link)->rb_left;
637 link = &(*link)->rb_right;
639 return cur_cm_id_priv;
642 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
643 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
647 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
648 enum ib_cm_sidr_status status)
650 struct ib_cm_sidr_rep_param param;
652 memset(¶m, 0, sizeof param);
653 param.status = status;
654 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m);
657 struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
658 ib_cm_handler cm_handler,
661 struct cm_id_private *cm_id_priv;
664 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
666 return ERR_PTR(-ENOMEM);
668 cm_id_priv->id.state = IB_CM_IDLE;
669 cm_id_priv->id.device = device;
670 cm_id_priv->id.cm_handler = cm_handler;
671 cm_id_priv->id.context = context;
672 cm_id_priv->id.remote_cm_qpn = 1;
673 ret = cm_alloc_id(cm_id_priv);
677 spin_lock_init(&cm_id_priv->lock);
678 init_completion(&cm_id_priv->comp);
679 INIT_LIST_HEAD(&cm_id_priv->work_list);
680 atomic_set(&cm_id_priv->work_count, -1);
681 atomic_set(&cm_id_priv->refcount, 1);
682 return &cm_id_priv->id;
686 return ERR_PTR(-ENOMEM);
688 EXPORT_SYMBOL(ib_create_cm_id);
690 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
692 struct cm_work *work;
694 if (list_empty(&cm_id_priv->work_list))
697 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
698 list_del(&work->list);
702 static void cm_free_work(struct cm_work *work)
704 if (work->mad_recv_wc)
705 ib_free_recv_mad(work->mad_recv_wc);
709 static inline int cm_convert_to_ms(int iba_time)
711 /* approximate conversion to ms from 4.096us x 2^iba_time */
712 return 1 << max(iba_time - 8, 0);
716 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
717 * Because of how ack_timeout is stored, adding one doubles the timeout.
718 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
719 * increment it (round up) only if the other is within 50%.
721 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
723 int ack_timeout = packet_life_time + 1;
725 if (ack_timeout >= ca_ack_delay)
726 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
728 ack_timeout = ca_ack_delay +
729 (ack_timeout >= (ca_ack_delay - 1));
731 return min(31, ack_timeout);
734 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
736 if (timewait_info->inserted_remote_id) {
737 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
738 timewait_info->inserted_remote_id = 0;
741 if (timewait_info->inserted_remote_qp) {
742 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
743 timewait_info->inserted_remote_qp = 0;
747 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
749 struct cm_timewait_info *timewait_info;
751 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
753 return ERR_PTR(-ENOMEM);
755 timewait_info->work.local_id = local_id;
756 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
757 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
758 return timewait_info;
761 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
765 struct cm_device *cm_dev;
767 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
771 spin_lock_irqsave(&cm.lock, flags);
772 cm_cleanup_timewait(cm_id_priv->timewait_info);
773 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
774 spin_unlock_irqrestore(&cm.lock, flags);
777 * The cm_id could be destroyed by the user before we exit timewait.
778 * To protect against this, we search for the cm_id after exiting
779 * timewait before notifying the user that we've exited timewait.
781 cm_id_priv->id.state = IB_CM_TIMEWAIT;
782 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
784 /* Check if the device started its remove_one */
785 spin_lock_irq(&cm.lock);
786 if (!cm_dev->going_down)
787 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
788 msecs_to_jiffies(wait_time));
789 spin_unlock_irq(&cm.lock);
791 cm_id_priv->timewait_info = NULL;
794 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
798 cm_id_priv->id.state = IB_CM_IDLE;
799 if (cm_id_priv->timewait_info) {
800 spin_lock_irqsave(&cm.lock, flags);
801 cm_cleanup_timewait(cm_id_priv->timewait_info);
802 spin_unlock_irqrestore(&cm.lock, flags);
803 kfree(cm_id_priv->timewait_info);
804 cm_id_priv->timewait_info = NULL;
808 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
810 struct cm_id_private *cm_id_priv;
811 struct cm_work *work;
813 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
815 spin_lock_irq(&cm_id_priv->lock);
816 switch (cm_id->state) {
818 spin_unlock_irq(&cm_id_priv->lock);
820 spin_lock_irq(&cm.lock);
821 if (--cm_id_priv->listen_sharecount > 0) {
822 /* The id is still shared. */
823 cm_deref_id(cm_id_priv);
824 spin_unlock_irq(&cm.lock);
827 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
828 spin_unlock_irq(&cm.lock);
830 case IB_CM_SIDR_REQ_SENT:
831 cm_id->state = IB_CM_IDLE;
832 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
833 spin_unlock_irq(&cm_id_priv->lock);
835 case IB_CM_SIDR_REQ_RCVD:
836 spin_unlock_irq(&cm_id_priv->lock);
837 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
840 case IB_CM_MRA_REQ_RCVD:
841 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
842 spin_unlock_irq(&cm_id_priv->lock);
843 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
844 &cm_id_priv->id.device->node_guid,
845 sizeof cm_id_priv->id.device->node_guid,
849 if (err == -ENOMEM) {
850 /* Do not reject to allow future retries. */
851 cm_reset_to_idle(cm_id_priv);
852 spin_unlock_irq(&cm_id_priv->lock);
854 spin_unlock_irq(&cm_id_priv->lock);
855 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
860 case IB_CM_MRA_REP_RCVD:
861 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
863 case IB_CM_MRA_REQ_SENT:
865 case IB_CM_MRA_REP_SENT:
866 spin_unlock_irq(&cm_id_priv->lock);
867 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
870 case IB_CM_ESTABLISHED:
871 spin_unlock_irq(&cm_id_priv->lock);
872 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
874 ib_send_cm_dreq(cm_id, NULL, 0);
876 case IB_CM_DREQ_SENT:
877 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
878 cm_enter_timewait(cm_id_priv);
879 spin_unlock_irq(&cm_id_priv->lock);
881 case IB_CM_DREQ_RCVD:
882 spin_unlock_irq(&cm_id_priv->lock);
883 ib_send_cm_drep(cm_id, NULL, 0);
886 spin_unlock_irq(&cm_id_priv->lock);
890 cm_free_id(cm_id->local_id);
891 cm_deref_id(cm_id_priv);
892 wait_for_completion(&cm_id_priv->comp);
893 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
895 kfree(cm_id_priv->private_data);
899 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
901 cm_destroy_id(cm_id, 0);
903 EXPORT_SYMBOL(ib_destroy_cm_id);
906 * __ib_cm_listen - Initiates listening on the specified service ID for
907 * connection and service ID resolution requests.
908 * @cm_id: Connection identifier associated with the listen request.
909 * @service_id: Service identifier matched against incoming connection
910 * and service ID resolution requests. The service ID should be specified
911 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
912 * assign a service ID to the caller.
913 * @service_mask: Mask applied to service ID used to listen across a
914 * range of service IDs. If set to 0, the service ID is matched
915 * exactly. This parameter is ignored if %service_id is set to
916 * IB_CM_ASSIGN_SERVICE_ID.
918 static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
921 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
924 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
925 service_id &= service_mask;
926 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
927 (service_id != IB_CM_ASSIGN_SERVICE_ID))
930 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
931 if (cm_id->state != IB_CM_IDLE)
934 cm_id->state = IB_CM_LISTEN;
935 ++cm_id_priv->listen_sharecount;
937 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
938 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
939 cm_id->service_mask = ~cpu_to_be64(0);
941 cm_id->service_id = service_id;
942 cm_id->service_mask = service_mask;
944 cur_cm_id_priv = cm_insert_listen(cm_id_priv);
946 if (cur_cm_id_priv) {
947 cm_id->state = IB_CM_IDLE;
948 --cm_id_priv->listen_sharecount;
954 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
959 spin_lock_irqsave(&cm.lock, flags);
960 ret = __ib_cm_listen(cm_id, service_id, service_mask);
961 spin_unlock_irqrestore(&cm.lock, flags);
965 EXPORT_SYMBOL(ib_cm_listen);
968 * Create a new listening ib_cm_id and listen on the given service ID.
970 * If there's an existing ID listening on that same device and service ID,
973 * @device: Device associated with the cm_id. All related communication will
974 * be associated with the specified device.
975 * @cm_handler: Callback invoked to notify the user of CM events.
976 * @service_id: Service identifier matched against incoming connection
977 * and service ID resolution requests. The service ID should be specified
978 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
979 * assign a service ID to the caller.
981 * Callers should call ib_destroy_cm_id when done with the listener ID.
983 struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
984 ib_cm_handler cm_handler,
987 struct cm_id_private *cm_id_priv;
988 struct ib_cm_id *cm_id;
992 /* Create an ID in advance, since the creation may sleep */
993 cm_id = ib_create_cm_id(device, cm_handler, NULL);
997 spin_lock_irqsave(&cm.lock, flags);
999 if (service_id == IB_CM_ASSIGN_SERVICE_ID)
1002 /* Find an existing ID */
1003 cm_id_priv = cm_find_listen(device, service_id);
1005 if (cm_id->cm_handler != cm_handler || cm_id->context) {
1006 /* Sharing an ib_cm_id with different handlers is not
1008 spin_unlock_irqrestore(&cm.lock, flags);
1009 return ERR_PTR(-EINVAL);
1011 atomic_inc(&cm_id_priv->refcount);
1012 ++cm_id_priv->listen_sharecount;
1013 spin_unlock_irqrestore(&cm.lock, flags);
1015 ib_destroy_cm_id(cm_id);
1016 cm_id = &cm_id_priv->id;
1021 /* Use newly created ID */
1022 err = __ib_cm_listen(cm_id, service_id, 0);
1024 spin_unlock_irqrestore(&cm.lock, flags);
1027 ib_destroy_cm_id(cm_id);
1028 return ERR_PTR(err);
1032 EXPORT_SYMBOL(ib_cm_insert_listen);
1034 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
1035 enum cm_msg_sequence msg_seq)
1037 u64 hi_tid, low_tid;
1039 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1040 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
1042 return cpu_to_be64(hi_tid | low_tid);
1045 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1046 __be16 attr_id, __be64 tid)
1048 hdr->base_version = IB_MGMT_BASE_VERSION;
1049 hdr->mgmt_class = IB_MGMT_CLASS_CM;
1050 hdr->class_version = IB_CM_CLASS_VERSION;
1051 hdr->method = IB_MGMT_METHOD_SEND;
1052 hdr->attr_id = attr_id;
1056 static void cm_format_req(struct cm_req_msg *req_msg,
1057 struct cm_id_private *cm_id_priv,
1058 struct ib_cm_req_param *param)
1060 struct ib_sa_path_rec *pri_path = param->primary_path;
1061 struct ib_sa_path_rec *alt_path = param->alternate_path;
1063 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1064 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
1066 req_msg->local_comm_id = cm_id_priv->id.local_id;
1067 req_msg->service_id = param->service_id;
1068 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1069 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
1070 cm_req_set_init_depth(req_msg, param->initiator_depth);
1071 cm_req_set_remote_resp_timeout(req_msg,
1072 param->remote_cm_response_timeout);
1073 cm_req_set_qp_type(req_msg, param->qp_type);
1074 cm_req_set_flow_ctrl(req_msg, param->flow_control);
1075 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
1076 cm_req_set_local_resp_timeout(req_msg,
1077 param->local_cm_response_timeout);
1078 req_msg->pkey = param->primary_path->pkey;
1079 cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
1080 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
1082 if (param->qp_type != IB_QPT_XRC_INI) {
1083 cm_req_set_resp_res(req_msg, param->responder_resources);
1084 cm_req_set_retry_count(req_msg, param->retry_count);
1085 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
1086 cm_req_set_srq(req_msg, param->srq);
1089 if (pri_path->hop_limit <= 1) {
1090 req_msg->primary_local_lid = pri_path->slid;
1091 req_msg->primary_remote_lid = pri_path->dlid;
1093 /* Work-around until there's a way to obtain remote LID info */
1094 req_msg->primary_local_lid = IB_LID_PERMISSIVE;
1095 req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
1097 req_msg->primary_local_gid = pri_path->sgid;
1098 req_msg->primary_remote_gid = pri_path->dgid;
1099 cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
1100 cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
1101 req_msg->primary_traffic_class = pri_path->traffic_class;
1102 req_msg->primary_hop_limit = pri_path->hop_limit;
1103 cm_req_set_primary_sl(req_msg, pri_path->sl);
1104 cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
1105 cm_req_set_primary_local_ack_timeout(req_msg,
1106 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1107 pri_path->packet_life_time));
1110 if (alt_path->hop_limit <= 1) {
1111 req_msg->alt_local_lid = alt_path->slid;
1112 req_msg->alt_remote_lid = alt_path->dlid;
1114 req_msg->alt_local_lid = IB_LID_PERMISSIVE;
1115 req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
1117 req_msg->alt_local_gid = alt_path->sgid;
1118 req_msg->alt_remote_gid = alt_path->dgid;
1119 cm_req_set_alt_flow_label(req_msg,
1120 alt_path->flow_label);
1121 cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
1122 req_msg->alt_traffic_class = alt_path->traffic_class;
1123 req_msg->alt_hop_limit = alt_path->hop_limit;
1124 cm_req_set_alt_sl(req_msg, alt_path->sl);
1125 cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
1126 cm_req_set_alt_local_ack_timeout(req_msg,
1127 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1128 alt_path->packet_life_time));
1131 if (param->private_data && param->private_data_len)
1132 memcpy(req_msg->private_data, param->private_data,
1133 param->private_data_len);
1136 static int cm_validate_req_param(struct ib_cm_req_param *param)
1138 /* peer-to-peer not supported */
1139 if (param->peer_to_peer)
1142 if (!param->primary_path)
1145 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1146 param->qp_type != IB_QPT_XRC_INI)
1149 if (param->private_data &&
1150 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1153 if (param->alternate_path &&
1154 (param->alternate_path->pkey != param->primary_path->pkey ||
1155 param->alternate_path->mtu != param->primary_path->mtu))
1161 int ib_send_cm_req(struct ib_cm_id *cm_id,
1162 struct ib_cm_req_param *param)
1164 struct cm_id_private *cm_id_priv;
1165 struct cm_req_msg *req_msg;
1166 unsigned long flags;
1169 ret = cm_validate_req_param(param);
1173 /* Verify that we're not in timewait. */
1174 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1175 spin_lock_irqsave(&cm_id_priv->lock, flags);
1176 if (cm_id->state != IB_CM_IDLE) {
1177 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1181 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1183 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1185 if (IS_ERR(cm_id_priv->timewait_info)) {
1186 ret = PTR_ERR(cm_id_priv->timewait_info);
1190 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
1193 if (param->alternate_path) {
1194 ret = cm_init_av_by_path(param->alternate_path,
1195 &cm_id_priv->alt_av);
1199 cm_id->service_id = param->service_id;
1200 cm_id->service_mask = ~cpu_to_be64(0);
1201 cm_id_priv->timeout_ms = cm_convert_to_ms(
1202 param->primary_path->packet_life_time) * 2 +
1204 param->remote_cm_response_timeout);
1205 cm_id_priv->max_cm_retries = param->max_cm_retries;
1206 cm_id_priv->initiator_depth = param->initiator_depth;
1207 cm_id_priv->responder_resources = param->responder_resources;
1208 cm_id_priv->retry_count = param->retry_count;
1209 cm_id_priv->path_mtu = param->primary_path->mtu;
1210 cm_id_priv->pkey = param->primary_path->pkey;
1211 cm_id_priv->qp_type = param->qp_type;
1213 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1217 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1218 cm_format_req(req_msg, cm_id_priv, param);
1219 cm_id_priv->tid = req_msg->hdr.tid;
1220 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1221 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1223 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1224 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1226 spin_lock_irqsave(&cm_id_priv->lock, flags);
1227 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1229 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1232 BUG_ON(cm_id->state != IB_CM_IDLE);
1233 cm_id->state = IB_CM_REQ_SENT;
1234 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1237 error2: cm_free_msg(cm_id_priv->msg);
1238 error1: kfree(cm_id_priv->timewait_info);
1241 EXPORT_SYMBOL(ib_send_cm_req);
1243 static int cm_issue_rej(struct cm_port *port,
1244 struct ib_mad_recv_wc *mad_recv_wc,
1245 enum ib_cm_rej_reason reason,
1246 enum cm_msg_response msg_rejected,
1247 void *ari, u8 ari_length)
1249 struct ib_mad_send_buf *msg = NULL;
1250 struct cm_rej_msg *rej_msg, *rcv_msg;
1253 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1257 /* We just need common CM header information. Cast to any message. */
1258 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1259 rej_msg = (struct cm_rej_msg *) msg->mad;
1261 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1262 rej_msg->remote_comm_id = rcv_msg->local_comm_id;
1263 rej_msg->local_comm_id = rcv_msg->remote_comm_id;
1264 cm_rej_set_msg_rejected(rej_msg, msg_rejected);
1265 rej_msg->reason = cpu_to_be16(reason);
1267 if (ari && ari_length) {
1268 cm_rej_set_reject_info_len(rej_msg, ari_length);
1269 memcpy(rej_msg->ari, ari, ari_length);
1272 ret = ib_post_send_mad(msg, NULL);
1279 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1280 __be32 local_qpn, __be32 remote_qpn)
1282 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1283 ((local_ca_guid == remote_ca_guid) &&
1284 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1287 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1288 struct ib_sa_path_rec *primary_path,
1289 struct ib_sa_path_rec *alt_path)
1291 memset(primary_path, 0, sizeof *primary_path);
1292 primary_path->dgid = req_msg->primary_local_gid;
1293 primary_path->sgid = req_msg->primary_remote_gid;
1294 primary_path->dlid = req_msg->primary_local_lid;
1295 primary_path->slid = req_msg->primary_remote_lid;
1296 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1297 primary_path->hop_limit = req_msg->primary_hop_limit;
1298 primary_path->traffic_class = req_msg->primary_traffic_class;
1299 primary_path->reversible = 1;
1300 primary_path->pkey = req_msg->pkey;
1301 primary_path->sl = cm_req_get_primary_sl(req_msg);
1302 primary_path->mtu_selector = IB_SA_EQ;
1303 primary_path->mtu = cm_req_get_path_mtu(req_msg);
1304 primary_path->rate_selector = IB_SA_EQ;
1305 primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1306 primary_path->packet_life_time_selector = IB_SA_EQ;
1307 primary_path->packet_life_time =
1308 cm_req_get_primary_local_ack_timeout(req_msg);
1309 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1310 primary_path->service_id = req_msg->service_id;
1312 if (req_msg->alt_local_lid) {
1313 memset(alt_path, 0, sizeof *alt_path);
1314 alt_path->dgid = req_msg->alt_local_gid;
1315 alt_path->sgid = req_msg->alt_remote_gid;
1316 alt_path->dlid = req_msg->alt_local_lid;
1317 alt_path->slid = req_msg->alt_remote_lid;
1318 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1319 alt_path->hop_limit = req_msg->alt_hop_limit;
1320 alt_path->traffic_class = req_msg->alt_traffic_class;
1321 alt_path->reversible = 1;
1322 alt_path->pkey = req_msg->pkey;
1323 alt_path->sl = cm_req_get_alt_sl(req_msg);
1324 alt_path->mtu_selector = IB_SA_EQ;
1325 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1326 alt_path->rate_selector = IB_SA_EQ;
1327 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1328 alt_path->packet_life_time_selector = IB_SA_EQ;
1329 alt_path->packet_life_time =
1330 cm_req_get_alt_local_ack_timeout(req_msg);
1331 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1332 alt_path->service_id = req_msg->service_id;
1336 static u16 cm_get_bth_pkey(struct cm_work *work)
1338 struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1339 u8 port_num = work->port->port_num;
1340 u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1344 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1346 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
1347 port_num, pkey_index, ret);
1354 static void cm_format_req_event(struct cm_work *work,
1355 struct cm_id_private *cm_id_priv,
1356 struct ib_cm_id *listen_id)
1358 struct cm_req_msg *req_msg;
1359 struct ib_cm_req_event_param *param;
1361 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1362 param = &work->cm_event.param.req_rcvd;
1363 param->listen_id = listen_id;
1364 param->bth_pkey = cm_get_bth_pkey(work);
1365 param->port = cm_id_priv->av.port->port_num;
1366 param->primary_path = &work->path[0];
1367 if (req_msg->alt_local_lid)
1368 param->alternate_path = &work->path[1];
1370 param->alternate_path = NULL;
1371 param->remote_ca_guid = req_msg->local_ca_guid;
1372 param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1373 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1374 param->qp_type = cm_req_get_qp_type(req_msg);
1375 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1376 param->responder_resources = cm_req_get_init_depth(req_msg);
1377 param->initiator_depth = cm_req_get_resp_res(req_msg);
1378 param->local_cm_response_timeout =
1379 cm_req_get_remote_resp_timeout(req_msg);
1380 param->flow_control = cm_req_get_flow_ctrl(req_msg);
1381 param->remote_cm_response_timeout =
1382 cm_req_get_local_resp_timeout(req_msg);
1383 param->retry_count = cm_req_get_retry_count(req_msg);
1384 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1385 param->srq = cm_req_get_srq(req_msg);
1386 work->cm_event.private_data = &req_msg->private_data;
1389 static void cm_process_work(struct cm_id_private *cm_id_priv,
1390 struct cm_work *work)
1394 /* We will typically only have the current event to report. */
1395 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1398 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1399 spin_lock_irq(&cm_id_priv->lock);
1400 work = cm_dequeue_work(cm_id_priv);
1401 spin_unlock_irq(&cm_id_priv->lock);
1403 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1407 cm_deref_id(cm_id_priv);
1409 cm_destroy_id(&cm_id_priv->id, ret);
1412 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1413 struct cm_id_private *cm_id_priv,
1414 enum cm_msg_response msg_mraed, u8 service_timeout,
1415 const void *private_data, u8 private_data_len)
1417 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1418 cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1419 mra_msg->local_comm_id = cm_id_priv->id.local_id;
1420 mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1421 cm_mra_set_service_timeout(mra_msg, service_timeout);
1423 if (private_data && private_data_len)
1424 memcpy(mra_msg->private_data, private_data, private_data_len);
1427 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1428 struct cm_id_private *cm_id_priv,
1429 enum ib_cm_rej_reason reason,
1432 const void *private_data,
1433 u8 private_data_len)
1435 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1436 rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1438 switch(cm_id_priv->id.state) {
1439 case IB_CM_REQ_RCVD:
1440 rej_msg->local_comm_id = 0;
1441 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1443 case IB_CM_MRA_REQ_SENT:
1444 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1445 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1447 case IB_CM_REP_RCVD:
1448 case IB_CM_MRA_REP_SENT:
1449 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1450 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1453 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1454 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1458 rej_msg->reason = cpu_to_be16(reason);
1459 if (ari && ari_length) {
1460 cm_rej_set_reject_info_len(rej_msg, ari_length);
1461 memcpy(rej_msg->ari, ari, ari_length);
1464 if (private_data && private_data_len)
1465 memcpy(rej_msg->private_data, private_data, private_data_len);
1468 static void cm_dup_req_handler(struct cm_work *work,
1469 struct cm_id_private *cm_id_priv)
1471 struct ib_mad_send_buf *msg = NULL;
1474 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1475 counter[CM_REQ_COUNTER]);
1477 /* Quick state check to discard duplicate REQs. */
1478 if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1481 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1485 spin_lock_irq(&cm_id_priv->lock);
1486 switch (cm_id_priv->id.state) {
1487 case IB_CM_MRA_REQ_SENT:
1488 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1489 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1490 cm_id_priv->private_data,
1491 cm_id_priv->private_data_len);
1493 case IB_CM_TIMEWAIT:
1494 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1495 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1500 spin_unlock_irq(&cm_id_priv->lock);
1502 ret = ib_post_send_mad(msg, NULL);
1507 unlock: spin_unlock_irq(&cm_id_priv->lock);
1508 free: cm_free_msg(msg);
1511 static struct cm_id_private * cm_match_req(struct cm_work *work,
1512 struct cm_id_private *cm_id_priv)
1514 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1515 struct cm_timewait_info *timewait_info;
1516 struct cm_req_msg *req_msg;
1518 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1520 /* Check for possible duplicate REQ. */
1521 spin_lock_irq(&cm.lock);
1522 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1523 if (timewait_info) {
1524 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1525 timewait_info->work.remote_id);
1526 spin_unlock_irq(&cm.lock);
1527 if (cur_cm_id_priv) {
1528 cm_dup_req_handler(work, cur_cm_id_priv);
1529 cm_deref_id(cur_cm_id_priv);
1534 /* Check for stale connections. */
1535 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1536 if (timewait_info) {
1537 cm_cleanup_timewait(cm_id_priv->timewait_info);
1538 spin_unlock_irq(&cm.lock);
1539 cm_issue_rej(work->port, work->mad_recv_wc,
1540 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1545 /* Find matching listen request. */
1546 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1547 req_msg->service_id);
1548 if (!listen_cm_id_priv) {
1549 cm_cleanup_timewait(cm_id_priv->timewait_info);
1550 spin_unlock_irq(&cm.lock);
1551 cm_issue_rej(work->port, work->mad_recv_wc,
1552 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1556 atomic_inc(&listen_cm_id_priv->refcount);
1557 atomic_inc(&cm_id_priv->refcount);
1558 cm_id_priv->id.state = IB_CM_REQ_RCVD;
1559 atomic_inc(&cm_id_priv->work_count);
1560 spin_unlock_irq(&cm.lock);
1562 return listen_cm_id_priv;
1566 * Work-around for inter-subnet connections. If the LIDs are permissive,
1567 * we need to override the LID/SL data in the REQ with the LID information
1568 * in the work completion.
1570 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
1572 if (!cm_req_get_primary_subnet_local(req_msg)) {
1573 if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
1574 req_msg->primary_local_lid = cpu_to_be16(wc->slid);
1575 cm_req_set_primary_sl(req_msg, wc->sl);
1578 if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
1579 req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1582 if (!cm_req_get_alt_subnet_local(req_msg)) {
1583 if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
1584 req_msg->alt_local_lid = cpu_to_be16(wc->slid);
1585 cm_req_set_alt_sl(req_msg, wc->sl);
1588 if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
1589 req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1593 static int cm_req_handler(struct cm_work *work)
1595 struct ib_cm_id *cm_id;
1596 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1597 struct cm_req_msg *req_msg;
1600 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1602 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
1604 return PTR_ERR(cm_id);
1606 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1607 cm_id_priv->id.remote_id = req_msg->local_comm_id;
1608 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1609 work->mad_recv_wc->recv_buf.grh,
1611 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1613 if (IS_ERR(cm_id_priv->timewait_info)) {
1614 ret = PTR_ERR(cm_id_priv->timewait_info);
1617 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1618 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1619 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1621 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1622 if (!listen_cm_id_priv) {
1624 kfree(cm_id_priv->timewait_info);
1628 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1629 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1630 cm_id_priv->id.service_id = req_msg->service_id;
1631 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1633 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1634 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1636 memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN);
1637 work->path[0].vlan_id = cm_id_priv->av.ah_attr.vlan_id;
1638 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
1640 ib_get_cached_gid(work->port->cm_dev->ib_device,
1641 work->port->port_num, 0, &work->path[0].sgid);
1642 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
1643 &work->path[0].sgid, sizeof work->path[0].sgid,
1647 if (req_msg->alt_local_lid) {
1648 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
1650 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
1651 &work->path[0].sgid,
1652 sizeof work->path[0].sgid, NULL, 0);
1656 cm_id_priv->tid = req_msg->hdr.tid;
1657 cm_id_priv->timeout_ms = cm_convert_to_ms(
1658 cm_req_get_local_resp_timeout(req_msg));
1659 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1660 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1661 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1662 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1663 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1664 cm_id_priv->pkey = req_msg->pkey;
1665 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1666 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1667 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1668 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
1670 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1671 cm_process_work(cm_id_priv, work);
1672 cm_deref_id(listen_cm_id_priv);
1676 atomic_dec(&cm_id_priv->refcount);
1677 cm_deref_id(listen_cm_id_priv);
1679 ib_destroy_cm_id(cm_id);
1683 static void cm_format_rep(struct cm_rep_msg *rep_msg,
1684 struct cm_id_private *cm_id_priv,
1685 struct ib_cm_rep_param *param)
1687 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1688 rep_msg->local_comm_id = cm_id_priv->id.local_id;
1689 rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1690 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1691 rep_msg->resp_resources = param->responder_resources;
1692 cm_rep_set_target_ack_delay(rep_msg,
1693 cm_id_priv->av.port->cm_dev->ack_delay);
1694 cm_rep_set_failover(rep_msg, param->failover_accepted);
1695 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1696 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1698 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
1699 rep_msg->initiator_depth = param->initiator_depth;
1700 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1701 cm_rep_set_srq(rep_msg, param->srq);
1702 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1704 cm_rep_set_srq(rep_msg, 1);
1705 cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
1708 if (param->private_data && param->private_data_len)
1709 memcpy(rep_msg->private_data, param->private_data,
1710 param->private_data_len);
1713 int ib_send_cm_rep(struct ib_cm_id *cm_id,
1714 struct ib_cm_rep_param *param)
1716 struct cm_id_private *cm_id_priv;
1717 struct ib_mad_send_buf *msg;
1718 struct cm_rep_msg *rep_msg;
1719 unsigned long flags;
1722 if (param->private_data &&
1723 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1726 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1727 spin_lock_irqsave(&cm_id_priv->lock, flags);
1728 if (cm_id->state != IB_CM_REQ_RCVD &&
1729 cm_id->state != IB_CM_MRA_REQ_SENT) {
1734 ret = cm_alloc_msg(cm_id_priv, &msg);
1738 rep_msg = (struct cm_rep_msg *) msg->mad;
1739 cm_format_rep(rep_msg, cm_id_priv, param);
1740 msg->timeout_ms = cm_id_priv->timeout_ms;
1741 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1743 ret = ib_post_send_mad(msg, NULL);
1745 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1750 cm_id->state = IB_CM_REP_SENT;
1751 cm_id_priv->msg = msg;
1752 cm_id_priv->initiator_depth = param->initiator_depth;
1753 cm_id_priv->responder_resources = param->responder_resources;
1754 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1755 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
1757 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1760 EXPORT_SYMBOL(ib_send_cm_rep);
1762 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1763 struct cm_id_private *cm_id_priv,
1764 const void *private_data,
1765 u8 private_data_len)
1767 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1768 rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1769 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1771 if (private_data && private_data_len)
1772 memcpy(rtu_msg->private_data, private_data, private_data_len);
1775 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1776 const void *private_data,
1777 u8 private_data_len)
1779 struct cm_id_private *cm_id_priv;
1780 struct ib_mad_send_buf *msg;
1781 unsigned long flags;
1785 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1788 data = cm_copy_private_data(private_data, private_data_len);
1790 return PTR_ERR(data);
1792 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1793 spin_lock_irqsave(&cm_id_priv->lock, flags);
1794 if (cm_id->state != IB_CM_REP_RCVD &&
1795 cm_id->state != IB_CM_MRA_REP_SENT) {
1800 ret = cm_alloc_msg(cm_id_priv, &msg);
1804 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1805 private_data, private_data_len);
1807 ret = ib_post_send_mad(msg, NULL);
1809 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1815 cm_id->state = IB_CM_ESTABLISHED;
1816 cm_set_private_data(cm_id_priv, data, private_data_len);
1817 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1820 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1824 EXPORT_SYMBOL(ib_send_cm_rtu);
1826 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
1828 struct cm_rep_msg *rep_msg;
1829 struct ib_cm_rep_event_param *param;
1831 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1832 param = &work->cm_event.param.rep_rcvd;
1833 param->remote_ca_guid = rep_msg->local_ca_guid;
1834 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1835 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
1836 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1837 param->responder_resources = rep_msg->initiator_depth;
1838 param->initiator_depth = rep_msg->resp_resources;
1839 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1840 param->failover_accepted = cm_rep_get_failover(rep_msg);
1841 param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1842 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1843 param->srq = cm_rep_get_srq(rep_msg);
1844 work->cm_event.private_data = &rep_msg->private_data;
1847 static void cm_dup_rep_handler(struct cm_work *work)
1849 struct cm_id_private *cm_id_priv;
1850 struct cm_rep_msg *rep_msg;
1851 struct ib_mad_send_buf *msg = NULL;
1854 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
1855 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
1856 rep_msg->local_comm_id);
1860 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1861 counter[CM_REP_COUNTER]);
1862 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1866 spin_lock_irq(&cm_id_priv->lock);
1867 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
1868 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1869 cm_id_priv->private_data,
1870 cm_id_priv->private_data_len);
1871 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
1872 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1873 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
1874 cm_id_priv->private_data,
1875 cm_id_priv->private_data_len);
1878 spin_unlock_irq(&cm_id_priv->lock);
1880 ret = ib_post_send_mad(msg, NULL);
1885 unlock: spin_unlock_irq(&cm_id_priv->lock);
1886 free: cm_free_msg(msg);
1887 deref: cm_deref_id(cm_id_priv);
1890 static int cm_rep_handler(struct cm_work *work)
1892 struct cm_id_private *cm_id_priv;
1893 struct cm_rep_msg *rep_msg;
1896 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1897 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
1899 cm_dup_rep_handler(work);
1903 cm_format_rep_event(work, cm_id_priv->qp_type);
1905 spin_lock_irq(&cm_id_priv->lock);
1906 switch (cm_id_priv->id.state) {
1907 case IB_CM_REQ_SENT:
1908 case IB_CM_MRA_REQ_RCVD:
1911 spin_unlock_irq(&cm_id_priv->lock);
1916 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
1917 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
1918 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
1920 spin_lock(&cm.lock);
1921 /* Check for duplicate REP. */
1922 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
1923 spin_unlock(&cm.lock);
1924 spin_unlock_irq(&cm_id_priv->lock);
1928 /* Check for a stale connection. */
1929 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
1930 rb_erase(&cm_id_priv->timewait_info->remote_id_node,
1931 &cm.remote_id_table);
1932 cm_id_priv->timewait_info->inserted_remote_id = 0;
1933 spin_unlock(&cm.lock);
1934 spin_unlock_irq(&cm_id_priv->lock);
1935 cm_issue_rej(work->port, work->mad_recv_wc,
1936 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
1941 spin_unlock(&cm.lock);
1943 cm_id_priv->id.state = IB_CM_REP_RCVD;
1944 cm_id_priv->id.remote_id = rep_msg->local_comm_id;
1945 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
1946 cm_id_priv->initiator_depth = rep_msg->resp_resources;
1947 cm_id_priv->responder_resources = rep_msg->initiator_depth;
1948 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
1949 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1950 cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1951 cm_id_priv->av.timeout =
1952 cm_ack_timeout(cm_id_priv->target_ack_delay,
1953 cm_id_priv->av.timeout - 1);
1954 cm_id_priv->alt_av.timeout =
1955 cm_ack_timeout(cm_id_priv->target_ack_delay,
1956 cm_id_priv->alt_av.timeout - 1);
1958 /* todo: handle peer_to_peer */
1960 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1961 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1963 list_add_tail(&work->list, &cm_id_priv->work_list);
1964 spin_unlock_irq(&cm_id_priv->lock);
1967 cm_process_work(cm_id_priv, work);
1969 cm_deref_id(cm_id_priv);
1973 cm_deref_id(cm_id_priv);
1977 static int cm_establish_handler(struct cm_work *work)
1979 struct cm_id_private *cm_id_priv;
1982 /* See comment in cm_establish about lookup. */
1983 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
1987 spin_lock_irq(&cm_id_priv->lock);
1988 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
1989 spin_unlock_irq(&cm_id_priv->lock);
1993 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1994 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1996 list_add_tail(&work->list, &cm_id_priv->work_list);
1997 spin_unlock_irq(&cm_id_priv->lock);
2000 cm_process_work(cm_id_priv, work);
2002 cm_deref_id(cm_id_priv);
2005 cm_deref_id(cm_id_priv);
2009 static int cm_rtu_handler(struct cm_work *work)
2011 struct cm_id_private *cm_id_priv;
2012 struct cm_rtu_msg *rtu_msg;
2015 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2016 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
2017 rtu_msg->local_comm_id);
2021 work->cm_event.private_data = &rtu_msg->private_data;
2023 spin_lock_irq(&cm_id_priv->lock);
2024 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2025 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2026 spin_unlock_irq(&cm_id_priv->lock);
2027 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2028 counter[CM_RTU_COUNTER]);
2031 cm_id_priv->id.state = IB_CM_ESTABLISHED;
2033 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2034 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2036 list_add_tail(&work->list, &cm_id_priv->work_list);
2037 spin_unlock_irq(&cm_id_priv->lock);
2040 cm_process_work(cm_id_priv, work);
2042 cm_deref_id(cm_id_priv);
2045 cm_deref_id(cm_id_priv);
2049 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2050 struct cm_id_private *cm_id_priv,
2051 const void *private_data,
2052 u8 private_data_len)
2054 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2055 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
2056 dreq_msg->local_comm_id = cm_id_priv->id.local_id;
2057 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
2058 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
2060 if (private_data && private_data_len)
2061 memcpy(dreq_msg->private_data, private_data, private_data_len);
2064 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
2065 const void *private_data,
2066 u8 private_data_len)
2068 struct cm_id_private *cm_id_priv;
2069 struct ib_mad_send_buf *msg;
2070 unsigned long flags;
2073 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2076 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2077 spin_lock_irqsave(&cm_id_priv->lock, flags);
2078 if (cm_id->state != IB_CM_ESTABLISHED) {
2083 if (cm_id->lap_state == IB_CM_LAP_SENT ||
2084 cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
2085 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2087 ret = cm_alloc_msg(cm_id_priv, &msg);
2089 cm_enter_timewait(cm_id_priv);
2093 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2094 private_data, private_data_len);
2095 msg->timeout_ms = cm_id_priv->timeout_ms;
2096 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2098 ret = ib_post_send_mad(msg, NULL);
2100 cm_enter_timewait(cm_id_priv);
2101 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2106 cm_id->state = IB_CM_DREQ_SENT;
2107 cm_id_priv->msg = msg;
2108 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2111 EXPORT_SYMBOL(ib_send_cm_dreq);
2113 static void cm_format_drep(struct cm_drep_msg *drep_msg,
2114 struct cm_id_private *cm_id_priv,
2115 const void *private_data,
2116 u8 private_data_len)
2118 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2119 drep_msg->local_comm_id = cm_id_priv->id.local_id;
2120 drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
2122 if (private_data && private_data_len)
2123 memcpy(drep_msg->private_data, private_data, private_data_len);
2126 int ib_send_cm_drep(struct ib_cm_id *cm_id,
2127 const void *private_data,
2128 u8 private_data_len)
2130 struct cm_id_private *cm_id_priv;
2131 struct ib_mad_send_buf *msg;
2132 unsigned long flags;
2136 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2139 data = cm_copy_private_data(private_data, private_data_len);
2141 return PTR_ERR(data);
2143 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2144 spin_lock_irqsave(&cm_id_priv->lock, flags);
2145 if (cm_id->state != IB_CM_DREQ_RCVD) {
2146 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2151 cm_set_private_data(cm_id_priv, data, private_data_len);
2152 cm_enter_timewait(cm_id_priv);
2154 ret = cm_alloc_msg(cm_id_priv, &msg);
2158 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2159 private_data, private_data_len);
2161 ret = ib_post_send_mad(msg, NULL);
2163 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2168 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2171 EXPORT_SYMBOL(ib_send_cm_drep);
2173 static int cm_issue_drep(struct cm_port *port,
2174 struct ib_mad_recv_wc *mad_recv_wc)
2176 struct ib_mad_send_buf *msg = NULL;
2177 struct cm_dreq_msg *dreq_msg;
2178 struct cm_drep_msg *drep_msg;
2181 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2185 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2186 drep_msg = (struct cm_drep_msg *) msg->mad;
2188 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2189 drep_msg->remote_comm_id = dreq_msg->local_comm_id;
2190 drep_msg->local_comm_id = dreq_msg->remote_comm_id;
2192 ret = ib_post_send_mad(msg, NULL);
2199 static int cm_dreq_handler(struct cm_work *work)
2201 struct cm_id_private *cm_id_priv;
2202 struct cm_dreq_msg *dreq_msg;
2203 struct ib_mad_send_buf *msg = NULL;
2206 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2207 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
2208 dreq_msg->local_comm_id);
2210 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2211 counter[CM_DREQ_COUNTER]);
2212 cm_issue_drep(work->port, work->mad_recv_wc);
2216 work->cm_event.private_data = &dreq_msg->private_data;
2218 spin_lock_irq(&cm_id_priv->lock);
2219 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
2222 switch (cm_id_priv->id.state) {
2223 case IB_CM_REP_SENT:
2224 case IB_CM_DREQ_SENT:
2225 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2227 case IB_CM_ESTABLISHED:
2228 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2229 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2230 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2232 case IB_CM_MRA_REP_RCVD:
2234 case IB_CM_TIMEWAIT:
2235 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2236 counter[CM_DREQ_COUNTER]);
2237 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2240 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2241 cm_id_priv->private_data,
2242 cm_id_priv->private_data_len);
2243 spin_unlock_irq(&cm_id_priv->lock);
2245 if (ib_post_send_mad(msg, NULL))
2248 case IB_CM_DREQ_RCVD:
2249 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2250 counter[CM_DREQ_COUNTER]);
2255 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2256 cm_id_priv->tid = dreq_msg->hdr.tid;
2257 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2259 list_add_tail(&work->list, &cm_id_priv->work_list);
2260 spin_unlock_irq(&cm_id_priv->lock);
2263 cm_process_work(cm_id_priv, work);
2265 cm_deref_id(cm_id_priv);
2268 unlock: spin_unlock_irq(&cm_id_priv->lock);
2269 deref: cm_deref_id(cm_id_priv);
2273 static int cm_drep_handler(struct cm_work *work)
2275 struct cm_id_private *cm_id_priv;
2276 struct cm_drep_msg *drep_msg;
2279 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2280 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
2281 drep_msg->local_comm_id);
2285 work->cm_event.private_data = &drep_msg->private_data;
2287 spin_lock_irq(&cm_id_priv->lock);
2288 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2289 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2290 spin_unlock_irq(&cm_id_priv->lock);
2293 cm_enter_timewait(cm_id_priv);
2295 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2296 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2298 list_add_tail(&work->list, &cm_id_priv->work_list);
2299 spin_unlock_irq(&cm_id_priv->lock);
2302 cm_process_work(cm_id_priv, work);
2304 cm_deref_id(cm_id_priv);
2307 cm_deref_id(cm_id_priv);
2311 int ib_send_cm_rej(struct ib_cm_id *cm_id,
2312 enum ib_cm_rej_reason reason,
2315 const void *private_data,
2316 u8 private_data_len)
2318 struct cm_id_private *cm_id_priv;
2319 struct ib_mad_send_buf *msg;
2320 unsigned long flags;
2323 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2324 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2327 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2329 spin_lock_irqsave(&cm_id_priv->lock, flags);
2330 switch (cm_id->state) {
2331 case IB_CM_REQ_SENT:
2332 case IB_CM_MRA_REQ_RCVD:
2333 case IB_CM_REQ_RCVD:
2334 case IB_CM_MRA_REQ_SENT:
2335 case IB_CM_REP_RCVD:
2336 case IB_CM_MRA_REP_SENT:
2337 ret = cm_alloc_msg(cm_id_priv, &msg);
2339 cm_format_rej((struct cm_rej_msg *) msg->mad,
2340 cm_id_priv, reason, ari, ari_length,
2341 private_data, private_data_len);
2343 cm_reset_to_idle(cm_id_priv);
2345 case IB_CM_REP_SENT:
2346 case IB_CM_MRA_REP_RCVD:
2347 ret = cm_alloc_msg(cm_id_priv, &msg);
2349 cm_format_rej((struct cm_rej_msg *) msg->mad,
2350 cm_id_priv, reason, ari, ari_length,
2351 private_data, private_data_len);
2353 cm_enter_timewait(cm_id_priv);
2363 ret = ib_post_send_mad(msg, NULL);
2367 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2370 EXPORT_SYMBOL(ib_send_cm_rej);
2372 static void cm_format_rej_event(struct cm_work *work)
2374 struct cm_rej_msg *rej_msg;
2375 struct ib_cm_rej_event_param *param;
2377 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2378 param = &work->cm_event.param.rej_rcvd;
2379 param->ari = rej_msg->ari;
2380 param->ari_length = cm_rej_get_reject_info_len(rej_msg);
2381 param->reason = __be16_to_cpu(rej_msg->reason);
2382 work->cm_event.private_data = &rej_msg->private_data;
2385 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2387 struct cm_timewait_info *timewait_info;
2388 struct cm_id_private *cm_id_priv;
2391 remote_id = rej_msg->local_comm_id;
2393 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
2394 spin_lock_irq(&cm.lock);
2395 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
2397 if (!timewait_info) {
2398 spin_unlock_irq(&cm.lock);
2401 cm_id_priv = idr_find(&cm.local_id_table, (__force int)
2402 (timewait_info->work.local_id ^
2403 cm.random_id_operand));
2405 if (cm_id_priv->id.remote_id == remote_id)
2406 atomic_inc(&cm_id_priv->refcount);
2410 spin_unlock_irq(&cm.lock);
2411 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2412 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2414 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2419 static int cm_rej_handler(struct cm_work *work)
2421 struct cm_id_private *cm_id_priv;
2422 struct cm_rej_msg *rej_msg;
2425 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2426 cm_id_priv = cm_acquire_rejected_id(rej_msg);
2430 cm_format_rej_event(work);
2432 spin_lock_irq(&cm_id_priv->lock);
2433 switch (cm_id_priv->id.state) {
2434 case IB_CM_REQ_SENT:
2435 case IB_CM_MRA_REQ_RCVD:
2436 case IB_CM_REP_SENT:
2437 case IB_CM_MRA_REP_RCVD:
2438 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2440 case IB_CM_REQ_RCVD:
2441 case IB_CM_MRA_REQ_SENT:
2442 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2443 cm_enter_timewait(cm_id_priv);
2445 cm_reset_to_idle(cm_id_priv);
2447 case IB_CM_DREQ_SENT:
2448 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2450 case IB_CM_REP_RCVD:
2451 case IB_CM_MRA_REP_SENT:
2452 cm_enter_timewait(cm_id_priv);
2454 case IB_CM_ESTABLISHED:
2455 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
2456 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
2457 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
2458 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2460 cm_enter_timewait(cm_id_priv);
2465 spin_unlock_irq(&cm_id_priv->lock);
2470 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2472 list_add_tail(&work->list, &cm_id_priv->work_list);
2473 spin_unlock_irq(&cm_id_priv->lock);
2476 cm_process_work(cm_id_priv, work);
2478 cm_deref_id(cm_id_priv);
2481 cm_deref_id(cm_id_priv);
2485 int ib_send_cm_mra(struct ib_cm_id *cm_id,
2487 const void *private_data,
2488 u8 private_data_len)
2490 struct cm_id_private *cm_id_priv;
2491 struct ib_mad_send_buf *msg;
2492 enum ib_cm_state cm_state;
2493 enum ib_cm_lap_state lap_state;
2494 enum cm_msg_response msg_response;
2496 unsigned long flags;
2499 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2502 data = cm_copy_private_data(private_data, private_data_len);
2504 return PTR_ERR(data);
2506 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2508 spin_lock_irqsave(&cm_id_priv->lock, flags);
2509 switch(cm_id_priv->id.state) {
2510 case IB_CM_REQ_RCVD:
2511 cm_state = IB_CM_MRA_REQ_SENT;
2512 lap_state = cm_id->lap_state;
2513 msg_response = CM_MSG_RESPONSE_REQ;
2515 case IB_CM_REP_RCVD:
2516 cm_state = IB_CM_MRA_REP_SENT;
2517 lap_state = cm_id->lap_state;
2518 msg_response = CM_MSG_RESPONSE_REP;
2520 case IB_CM_ESTABLISHED:
2521 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
2522 cm_state = cm_id->state;
2523 lap_state = IB_CM_MRA_LAP_SENT;
2524 msg_response = CM_MSG_RESPONSE_OTHER;
2532 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
2533 ret = cm_alloc_msg(cm_id_priv, &msg);
2537 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2538 msg_response, service_timeout,
2539 private_data, private_data_len);
2540 ret = ib_post_send_mad(msg, NULL);
2545 cm_id->state = cm_state;
2546 cm_id->lap_state = lap_state;
2547 cm_id_priv->service_timeout = service_timeout;
2548 cm_set_private_data(cm_id_priv, data, private_data_len);
2549 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2552 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2556 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2561 EXPORT_SYMBOL(ib_send_cm_mra);
2563 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2565 switch (cm_mra_get_msg_mraed(mra_msg)) {
2566 case CM_MSG_RESPONSE_REQ:
2567 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2568 case CM_MSG_RESPONSE_REP:
2569 case CM_MSG_RESPONSE_OTHER:
2570 return cm_acquire_id(mra_msg->remote_comm_id,
2571 mra_msg->local_comm_id);
2577 static int cm_mra_handler(struct cm_work *work)
2579 struct cm_id_private *cm_id_priv;
2580 struct cm_mra_msg *mra_msg;
2583 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2584 cm_id_priv = cm_acquire_mraed_id(mra_msg);
2588 work->cm_event.private_data = &mra_msg->private_data;
2589 work->cm_event.param.mra_rcvd.service_timeout =
2590 cm_mra_get_service_timeout(mra_msg);
2591 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2592 cm_convert_to_ms(cm_id_priv->av.timeout);
2594 spin_lock_irq(&cm_id_priv->lock);
2595 switch (cm_id_priv->id.state) {
2596 case IB_CM_REQ_SENT:
2597 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2598 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2599 cm_id_priv->msg, timeout))
2601 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2603 case IB_CM_REP_SENT:
2604 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2605 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2606 cm_id_priv->msg, timeout))
2608 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2610 case IB_CM_ESTABLISHED:
2611 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2612 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2613 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2614 cm_id_priv->msg, timeout)) {
2615 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2616 atomic_long_inc(&work->port->
2617 counter_group[CM_RECV_DUPLICATES].
2618 counter[CM_MRA_COUNTER]);
2621 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2623 case IB_CM_MRA_REQ_RCVD:
2624 case IB_CM_MRA_REP_RCVD:
2625 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2626 counter[CM_MRA_COUNTER]);
2632 cm_id_priv->msg->context[1] = (void *) (unsigned long)
2633 cm_id_priv->id.state;
2634 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2636 list_add_tail(&work->list, &cm_id_priv->work_list);
2637 spin_unlock_irq(&cm_id_priv->lock);
2640 cm_process_work(cm_id_priv, work);
2642 cm_deref_id(cm_id_priv);
2645 spin_unlock_irq(&cm_id_priv->lock);
2646 cm_deref_id(cm_id_priv);
2650 static void cm_format_lap(struct cm_lap_msg *lap_msg,
2651 struct cm_id_private *cm_id_priv,
2652 struct ib_sa_path_rec *alternate_path,
2653 const void *private_data,
2654 u8 private_data_len)
2656 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2657 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2658 lap_msg->local_comm_id = cm_id_priv->id.local_id;
2659 lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2660 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2661 /* todo: need remote CM response timeout */
2662 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2663 lap_msg->alt_local_lid = alternate_path->slid;
2664 lap_msg->alt_remote_lid = alternate_path->dlid;
2665 lap_msg->alt_local_gid = alternate_path->sgid;
2666 lap_msg->alt_remote_gid = alternate_path->dgid;
2667 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2668 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2669 lap_msg->alt_hop_limit = alternate_path->hop_limit;
2670 cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2671 cm_lap_set_sl(lap_msg, alternate_path->sl);
2672 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2673 cm_lap_set_local_ack_timeout(lap_msg,
2674 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
2675 alternate_path->packet_life_time));
2677 if (private_data && private_data_len)
2678 memcpy(lap_msg->private_data, private_data, private_data_len);
2681 int ib_send_cm_lap(struct ib_cm_id *cm_id,
2682 struct ib_sa_path_rec *alternate_path,
2683 const void *private_data,
2684 u8 private_data_len)
2686 struct cm_id_private *cm_id_priv;
2687 struct ib_mad_send_buf *msg;
2688 unsigned long flags;
2691 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2694 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2695 spin_lock_irqsave(&cm_id_priv->lock, flags);
2696 if (cm_id->state != IB_CM_ESTABLISHED ||
2697 (cm_id->lap_state != IB_CM_LAP_UNINIT &&
2698 cm_id->lap_state != IB_CM_LAP_IDLE)) {
2703 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
2706 cm_id_priv->alt_av.timeout =
2707 cm_ack_timeout(cm_id_priv->target_ack_delay,
2708 cm_id_priv->alt_av.timeout - 1);
2710 ret = cm_alloc_msg(cm_id_priv, &msg);
2714 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2715 alternate_path, private_data, private_data_len);
2716 msg->timeout_ms = cm_id_priv->timeout_ms;
2717 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2719 ret = ib_post_send_mad(msg, NULL);
2721 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2726 cm_id->lap_state = IB_CM_LAP_SENT;
2727 cm_id_priv->msg = msg;
2729 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2732 EXPORT_SYMBOL(ib_send_cm_lap);
2734 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
2735 struct ib_sa_path_rec *path,
2736 struct cm_lap_msg *lap_msg)
2738 memset(path, 0, sizeof *path);
2739 path->dgid = lap_msg->alt_local_gid;
2740 path->sgid = lap_msg->alt_remote_gid;
2741 path->dlid = lap_msg->alt_local_lid;
2742 path->slid = lap_msg->alt_remote_lid;
2743 path->flow_label = cm_lap_get_flow_label(lap_msg);
2744 path->hop_limit = lap_msg->alt_hop_limit;
2745 path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2746 path->reversible = 1;
2747 path->pkey = cm_id_priv->pkey;
2748 path->sl = cm_lap_get_sl(lap_msg);
2749 path->mtu_selector = IB_SA_EQ;
2750 path->mtu = cm_id_priv->path_mtu;
2751 path->rate_selector = IB_SA_EQ;
2752 path->rate = cm_lap_get_packet_rate(lap_msg);
2753 path->packet_life_time_selector = IB_SA_EQ;
2754 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2755 path->packet_life_time -= (path->packet_life_time > 0);
2758 static int cm_lap_handler(struct cm_work *work)
2760 struct cm_id_private *cm_id_priv;
2761 struct cm_lap_msg *lap_msg;
2762 struct ib_cm_lap_event_param *param;
2763 struct ib_mad_send_buf *msg = NULL;
2766 /* todo: verify LAP request and send reject APR if invalid. */
2767 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2768 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2769 lap_msg->local_comm_id);
2773 param = &work->cm_event.param.lap_rcvd;
2774 param->alternate_path = &work->path[0];
2775 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
2776 work->cm_event.private_data = &lap_msg->private_data;
2778 spin_lock_irq(&cm_id_priv->lock);
2779 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2782 switch (cm_id_priv->id.lap_state) {
2783 case IB_CM_LAP_UNINIT:
2784 case IB_CM_LAP_IDLE:
2786 case IB_CM_MRA_LAP_SENT:
2787 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2788 counter[CM_LAP_COUNTER]);
2789 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2792 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2793 CM_MSG_RESPONSE_OTHER,
2794 cm_id_priv->service_timeout,
2795 cm_id_priv->private_data,
2796 cm_id_priv->private_data_len);
2797 spin_unlock_irq(&cm_id_priv->lock);
2799 if (ib_post_send_mad(msg, NULL))
2802 case IB_CM_LAP_RCVD:
2803 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2804 counter[CM_LAP_COUNTER]);
2810 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2811 cm_id_priv->tid = lap_msg->hdr.tid;
2812 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2813 work->mad_recv_wc->recv_buf.grh,
2815 cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
2816 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2818 list_add_tail(&work->list, &cm_id_priv->work_list);
2819 spin_unlock_irq(&cm_id_priv->lock);
2822 cm_process_work(cm_id_priv, work);
2824 cm_deref_id(cm_id_priv);
2827 unlock: spin_unlock_irq(&cm_id_priv->lock);
2828 deref: cm_deref_id(cm_id_priv);
2832 static void cm_format_apr(struct cm_apr_msg *apr_msg,
2833 struct cm_id_private *cm_id_priv,
2834 enum ib_cm_apr_status status,
2837 const void *private_data,
2838 u8 private_data_len)
2840 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
2841 apr_msg->local_comm_id = cm_id_priv->id.local_id;
2842 apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
2843 apr_msg->ap_status = (u8) status;
2845 if (info && info_length) {
2846 apr_msg->info_length = info_length;
2847 memcpy(apr_msg->info, info, info_length);
2850 if (private_data && private_data_len)
2851 memcpy(apr_msg->private_data, private_data, private_data_len);
2854 int ib_send_cm_apr(struct ib_cm_id *cm_id,
2855 enum ib_cm_apr_status status,
2858 const void *private_data,
2859 u8 private_data_len)
2861 struct cm_id_private *cm_id_priv;
2862 struct ib_mad_send_buf *msg;
2863 unsigned long flags;
2866 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
2867 (info && info_length > IB_CM_APR_INFO_LENGTH))
2870 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2871 spin_lock_irqsave(&cm_id_priv->lock, flags);
2872 if (cm_id->state != IB_CM_ESTABLISHED ||
2873 (cm_id->lap_state != IB_CM_LAP_RCVD &&
2874 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
2879 ret = cm_alloc_msg(cm_id_priv, &msg);
2883 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
2884 info, info_length, private_data, private_data_len);
2885 ret = ib_post_send_mad(msg, NULL);
2887 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2892 cm_id->lap_state = IB_CM_LAP_IDLE;
2893 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2896 EXPORT_SYMBOL(ib_send_cm_apr);
2898 static int cm_apr_handler(struct cm_work *work)
2900 struct cm_id_private *cm_id_priv;
2901 struct cm_apr_msg *apr_msg;
2904 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
2905 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
2906 apr_msg->local_comm_id);
2908 return -EINVAL; /* Unmatched reply. */
2910 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
2911 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
2912 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
2913 work->cm_event.private_data = &apr_msg->private_data;
2915 spin_lock_irq(&cm_id_priv->lock);
2916 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
2917 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
2918 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
2919 spin_unlock_irq(&cm_id_priv->lock);
2922 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
2923 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2924 cm_id_priv->msg = NULL;
2926 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2928 list_add_tail(&work->list, &cm_id_priv->work_list);
2929 spin_unlock_irq(&cm_id_priv->lock);
2932 cm_process_work(cm_id_priv, work);
2934 cm_deref_id(cm_id_priv);
2937 cm_deref_id(cm_id_priv);
2941 static int cm_timewait_handler(struct cm_work *work)
2943 struct cm_timewait_info *timewait_info;
2944 struct cm_id_private *cm_id_priv;
2947 timewait_info = (struct cm_timewait_info *)work;
2948 spin_lock_irq(&cm.lock);
2949 list_del(&timewait_info->list);
2950 spin_unlock_irq(&cm.lock);
2952 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2953 timewait_info->work.remote_id);
2957 spin_lock_irq(&cm_id_priv->lock);
2958 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
2959 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
2960 spin_unlock_irq(&cm_id_priv->lock);
2963 cm_id_priv->id.state = IB_CM_IDLE;
2964 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2966 list_add_tail(&work->list, &cm_id_priv->work_list);
2967 spin_unlock_irq(&cm_id_priv->lock);
2970 cm_process_work(cm_id_priv, work);
2972 cm_deref_id(cm_id_priv);
2975 cm_deref_id(cm_id_priv);
2979 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
2980 struct cm_id_private *cm_id_priv,
2981 struct ib_cm_sidr_req_param *param)
2983 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
2984 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
2985 sidr_req_msg->request_id = cm_id_priv->id.local_id;
2986 sidr_req_msg->pkey = param->path->pkey;
2987 sidr_req_msg->service_id = param->service_id;
2989 if (param->private_data && param->private_data_len)
2990 memcpy(sidr_req_msg->private_data, param->private_data,
2991 param->private_data_len);
2994 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
2995 struct ib_cm_sidr_req_param *param)
2997 struct cm_id_private *cm_id_priv;
2998 struct ib_mad_send_buf *msg;
2999 unsigned long flags;
3002 if (!param->path || (param->private_data &&
3003 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3006 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3007 ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
3011 cm_id->service_id = param->service_id;
3012 cm_id->service_mask = ~cpu_to_be64(0);
3013 cm_id_priv->timeout_ms = param->timeout_ms;
3014 cm_id_priv->max_cm_retries = param->max_cm_retries;
3015 ret = cm_alloc_msg(cm_id_priv, &msg);
3019 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
3021 msg->timeout_ms = cm_id_priv->timeout_ms;
3022 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
3024 spin_lock_irqsave(&cm_id_priv->lock, flags);
3025 if (cm_id->state == IB_CM_IDLE)
3026 ret = ib_post_send_mad(msg, NULL);
3031 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3035 cm_id->state = IB_CM_SIDR_REQ_SENT;
3036 cm_id_priv->msg = msg;
3037 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3041 EXPORT_SYMBOL(ib_send_cm_sidr_req);
3043 static void cm_format_sidr_req_event(struct cm_work *work,
3044 struct ib_cm_id *listen_id)
3046 struct cm_sidr_req_msg *sidr_req_msg;
3047 struct ib_cm_sidr_req_event_param *param;
3049 sidr_req_msg = (struct cm_sidr_req_msg *)
3050 work->mad_recv_wc->recv_buf.mad;
3051 param = &work->cm_event.param.sidr_req_rcvd;
3052 param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
3053 param->listen_id = listen_id;
3054 param->service_id = sidr_req_msg->service_id;
3055 param->bth_pkey = cm_get_bth_pkey(work);
3056 param->port = work->port->port_num;
3057 work->cm_event.private_data = &sidr_req_msg->private_data;
3060 static int cm_sidr_req_handler(struct cm_work *work)
3062 struct ib_cm_id *cm_id;
3063 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
3064 struct cm_sidr_req_msg *sidr_req_msg;
3067 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
3069 return PTR_ERR(cm_id);
3070 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3072 /* Record SGID/SLID and request ID for lookup. */
3073 sidr_req_msg = (struct cm_sidr_req_msg *)
3074 work->mad_recv_wc->recv_buf.mad;
3075 wc = work->mad_recv_wc->wc;
3076 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
3077 cm_id_priv->av.dgid.global.interface_id = 0;
3078 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3079 work->mad_recv_wc->recv_buf.grh,
3081 cm_id_priv->id.remote_id = sidr_req_msg->request_id;
3082 cm_id_priv->tid = sidr_req_msg->hdr.tid;
3083 atomic_inc(&cm_id_priv->work_count);
3085 spin_lock_irq(&cm.lock);
3086 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3087 if (cur_cm_id_priv) {
3088 spin_unlock_irq(&cm.lock);
3089 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3090 counter[CM_SIDR_REQ_COUNTER]);
3091 goto out; /* Duplicate message. */
3093 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3094 cur_cm_id_priv = cm_find_listen(cm_id->device,
3095 sidr_req_msg->service_id);
3096 if (!cur_cm_id_priv) {
3097 spin_unlock_irq(&cm.lock);
3098 cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
3099 goto out; /* No match. */
3101 atomic_inc(&cur_cm_id_priv->refcount);
3102 atomic_inc(&cm_id_priv->refcount);
3103 spin_unlock_irq(&cm.lock);
3105 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
3106 cm_id_priv->id.context = cur_cm_id_priv->id.context;
3107 cm_id_priv->id.service_id = sidr_req_msg->service_id;
3108 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3110 cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
3111 cm_process_work(cm_id_priv, work);
3112 cm_deref_id(cur_cm_id_priv);
3115 ib_destroy_cm_id(&cm_id_priv->id);
3119 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3120 struct cm_id_private *cm_id_priv,
3121 struct ib_cm_sidr_rep_param *param)
3123 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3125 sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
3126 sidr_rep_msg->status = param->status;
3127 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
3128 sidr_rep_msg->service_id = cm_id_priv->id.service_id;
3129 sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
3131 if (param->info && param->info_length)
3132 memcpy(sidr_rep_msg->info, param->info, param->info_length);
3134 if (param->private_data && param->private_data_len)
3135 memcpy(sidr_rep_msg->private_data, param->private_data,
3136 param->private_data_len);
3139 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3140 struct ib_cm_sidr_rep_param *param)
3142 struct cm_id_private *cm_id_priv;
3143 struct ib_mad_send_buf *msg;
3144 unsigned long flags;
3147 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3148 (param->private_data &&
3149 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3152 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3153 spin_lock_irqsave(&cm_id_priv->lock, flags);
3154 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
3159 ret = cm_alloc_msg(cm_id_priv, &msg);
3163 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3165 ret = ib_post_send_mad(msg, NULL);
3167 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3171 cm_id->state = IB_CM_IDLE;
3172 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3174 spin_lock_irqsave(&cm.lock, flags);
3175 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3176 spin_unlock_irqrestore(&cm.lock, flags);
3179 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3182 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3184 static void cm_format_sidr_rep_event(struct cm_work *work)
3186 struct cm_sidr_rep_msg *sidr_rep_msg;
3187 struct ib_cm_sidr_rep_event_param *param;
3189 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3190 work->mad_recv_wc->recv_buf.mad;
3191 param = &work->cm_event.param.sidr_rep_rcvd;
3192 param->status = sidr_rep_msg->status;
3193 param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
3194 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
3195 param->info = &sidr_rep_msg->info;
3196 param->info_len = sidr_rep_msg->info_length;
3197 work->cm_event.private_data = &sidr_rep_msg->private_data;
3200 static int cm_sidr_rep_handler(struct cm_work *work)
3202 struct cm_sidr_rep_msg *sidr_rep_msg;
3203 struct cm_id_private *cm_id_priv;
3205 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3206 work->mad_recv_wc->recv_buf.mad;
3207 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
3209 return -EINVAL; /* Unmatched reply. */
3211 spin_lock_irq(&cm_id_priv->lock);
3212 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3213 spin_unlock_irq(&cm_id_priv->lock);
3216 cm_id_priv->id.state = IB_CM_IDLE;
3217 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3218 spin_unlock_irq(&cm_id_priv->lock);
3220 cm_format_sidr_rep_event(work);
3221 cm_process_work(cm_id_priv, work);
3224 cm_deref_id(cm_id_priv);
3228 static void cm_process_send_error(struct ib_mad_send_buf *msg,
3229 enum ib_wc_status wc_status)
3231 struct cm_id_private *cm_id_priv;
3232 struct ib_cm_event cm_event;
3233 enum ib_cm_state state;
3236 memset(&cm_event, 0, sizeof cm_event);
3237 cm_id_priv = msg->context[0];
3239 /* Discard old sends or ones without a response. */
3240 spin_lock_irq(&cm_id_priv->lock);
3241 state = (enum ib_cm_state) (unsigned long) msg->context[1];
3242 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
3246 case IB_CM_REQ_SENT:
3247 case IB_CM_MRA_REQ_RCVD:
3248 cm_reset_to_idle(cm_id_priv);
3249 cm_event.event = IB_CM_REQ_ERROR;
3251 case IB_CM_REP_SENT:
3252 case IB_CM_MRA_REP_RCVD:
3253 cm_reset_to_idle(cm_id_priv);
3254 cm_event.event = IB_CM_REP_ERROR;
3256 case IB_CM_DREQ_SENT:
3257 cm_enter_timewait(cm_id_priv);
3258 cm_event.event = IB_CM_DREQ_ERROR;
3260 case IB_CM_SIDR_REQ_SENT:
3261 cm_id_priv->id.state = IB_CM_IDLE;
3262 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3267 spin_unlock_irq(&cm_id_priv->lock);
3268 cm_event.param.send_status = wc_status;
3270 /* No other events can occur on the cm_id at this point. */
3271 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3274 ib_destroy_cm_id(&cm_id_priv->id);
3277 spin_unlock_irq(&cm_id_priv->lock);
3281 static void cm_send_handler(struct ib_mad_agent *mad_agent,
3282 struct ib_mad_send_wc *mad_send_wc)
3284 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3285 struct cm_port *port;
3288 port = mad_agent->context;
3289 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3290 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3293 * If the send was in response to a received message (context[0] is not
3294 * set to a cm_id), and is not a REJ, then it is a send that was
3297 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3300 atomic_long_add(1 + msg->retries,
3301 &port->counter_group[CM_XMIT].counter[attr_index]);
3303 atomic_long_add(msg->retries,
3304 &port->counter_group[CM_XMIT_RETRIES].
3305 counter[attr_index]);
3307 switch (mad_send_wc->status) {
3309 case IB_WC_WR_FLUSH_ERR:
3313 if (msg->context[0] && msg->context[1])
3314 cm_process_send_error(msg, mad_send_wc->status);
3321 static void cm_work_handler(struct work_struct *_work)
3323 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3326 switch (work->cm_event.event) {
3327 case IB_CM_REQ_RECEIVED:
3328 ret = cm_req_handler(work);
3330 case IB_CM_MRA_RECEIVED:
3331 ret = cm_mra_handler(work);
3333 case IB_CM_REJ_RECEIVED:
3334 ret = cm_rej_handler(work);
3336 case IB_CM_REP_RECEIVED:
3337 ret = cm_rep_handler(work);
3339 case IB_CM_RTU_RECEIVED:
3340 ret = cm_rtu_handler(work);
3342 case IB_CM_USER_ESTABLISHED:
3343 ret = cm_establish_handler(work);
3345 case IB_CM_DREQ_RECEIVED:
3346 ret = cm_dreq_handler(work);
3348 case IB_CM_DREP_RECEIVED:
3349 ret = cm_drep_handler(work);
3351 case IB_CM_SIDR_REQ_RECEIVED:
3352 ret = cm_sidr_req_handler(work);
3354 case IB_CM_SIDR_REP_RECEIVED:
3355 ret = cm_sidr_rep_handler(work);
3357 case IB_CM_LAP_RECEIVED:
3358 ret = cm_lap_handler(work);
3360 case IB_CM_APR_RECEIVED:
3361 ret = cm_apr_handler(work);
3363 case IB_CM_TIMEWAIT_EXIT:
3364 ret = cm_timewait_handler(work);
3374 static int cm_establish(struct ib_cm_id *cm_id)
3376 struct cm_id_private *cm_id_priv;
3377 struct cm_work *work;
3378 unsigned long flags;
3380 struct cm_device *cm_dev;
3382 cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3386 work = kmalloc(sizeof *work, GFP_ATOMIC);
3390 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3391 spin_lock_irqsave(&cm_id_priv->lock, flags);
3392 switch (cm_id->state)
3394 case IB_CM_REP_SENT:
3395 case IB_CM_MRA_REP_RCVD:
3396 cm_id->state = IB_CM_ESTABLISHED;
3398 case IB_CM_ESTABLISHED:
3405 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3413 * The CM worker thread may try to destroy the cm_id before it
3414 * can execute this work item. To prevent potential deadlock,
3415 * we need to find the cm_id once we're in the context of the
3416 * worker thread, rather than holding a reference on it.
3418 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3419 work->local_id = cm_id->local_id;
3420 work->remote_id = cm_id->remote_id;
3421 work->mad_recv_wc = NULL;
3422 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3424 /* Check if the device started its remove_one */
3425 spin_lock_irq(&cm.lock);
3426 if (!cm_dev->going_down) {
3427 queue_delayed_work(cm.wq, &work->work, 0);
3432 spin_unlock_irq(&cm.lock);
3438 static int cm_migrate(struct ib_cm_id *cm_id)
3440 struct cm_id_private *cm_id_priv;
3441 unsigned long flags;
3444 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3445 spin_lock_irqsave(&cm_id_priv->lock, flags);
3446 if (cm_id->state == IB_CM_ESTABLISHED &&
3447 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3448 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3449 cm_id->lap_state = IB_CM_LAP_IDLE;
3450 cm_id_priv->av = cm_id_priv->alt_av;
3453 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3458 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3463 case IB_EVENT_COMM_EST:
3464 ret = cm_establish(cm_id);
3466 case IB_EVENT_PATH_MIG:
3467 ret = cm_migrate(cm_id);
3474 EXPORT_SYMBOL(ib_cm_notify);
3476 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3477 struct ib_mad_recv_wc *mad_recv_wc)
3479 struct cm_port *port = mad_agent->context;
3480 struct cm_work *work;
3481 enum ib_cm_event_type event;
3486 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
3487 case CM_REQ_ATTR_ID:
3488 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
3489 alt_local_lid != 0);
3490 event = IB_CM_REQ_RECEIVED;
3492 case CM_MRA_ATTR_ID:
3493 event = IB_CM_MRA_RECEIVED;
3495 case CM_REJ_ATTR_ID:
3496 event = IB_CM_REJ_RECEIVED;
3498 case CM_REP_ATTR_ID:
3499 event = IB_CM_REP_RECEIVED;
3501 case CM_RTU_ATTR_ID:
3502 event = IB_CM_RTU_RECEIVED;
3504 case CM_DREQ_ATTR_ID:
3505 event = IB_CM_DREQ_RECEIVED;
3507 case CM_DREP_ATTR_ID:
3508 event = IB_CM_DREP_RECEIVED;
3510 case CM_SIDR_REQ_ATTR_ID:
3511 event = IB_CM_SIDR_REQ_RECEIVED;
3513 case CM_SIDR_REP_ATTR_ID:
3514 event = IB_CM_SIDR_REP_RECEIVED;
3516 case CM_LAP_ATTR_ID:
3518 event = IB_CM_LAP_RECEIVED;
3520 case CM_APR_ATTR_ID:
3521 event = IB_CM_APR_RECEIVED;
3524 ib_free_recv_mad(mad_recv_wc);
3528 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
3529 atomic_long_inc(&port->counter_group[CM_RECV].
3530 counter[attr_id - CM_ATTR_ID_OFFSET]);
3532 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3535 ib_free_recv_mad(mad_recv_wc);
3539 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3540 work->cm_event.event = event;
3541 work->mad_recv_wc = mad_recv_wc;
3544 /* Check if the device started its remove_one */
3545 spin_lock_irq(&cm.lock);
3546 if (!port->cm_dev->going_down)
3547 queue_delayed_work(cm.wq, &work->work, 0);
3550 spin_unlock_irq(&cm.lock);
3554 ib_free_recv_mad(mad_recv_wc);
3558 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3559 struct ib_qp_attr *qp_attr,
3562 unsigned long flags;
3565 spin_lock_irqsave(&cm_id_priv->lock, flags);
3566 switch (cm_id_priv->id.state) {
3567 case IB_CM_REQ_SENT:
3568 case IB_CM_MRA_REQ_RCVD:
3569 case IB_CM_REQ_RCVD:
3570 case IB_CM_MRA_REQ_SENT:
3571 case IB_CM_REP_RCVD:
3572 case IB_CM_MRA_REP_SENT:
3573 case IB_CM_REP_SENT:
3574 case IB_CM_MRA_REP_RCVD:
3575 case IB_CM_ESTABLISHED:
3576 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3577 IB_QP_PKEY_INDEX | IB_QP_PORT;
3578 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
3579 if (cm_id_priv->responder_resources)
3580 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
3581 IB_ACCESS_REMOTE_ATOMIC;
3582 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3583 qp_attr->port_num = cm_id_priv->av.port->port_num;
3590 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3594 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3595 struct ib_qp_attr *qp_attr,
3598 unsigned long flags;
3601 spin_lock_irqsave(&cm_id_priv->lock, flags);
3602 switch (cm_id_priv->id.state) {
3603 case IB_CM_REQ_RCVD:
3604 case IB_CM_MRA_REQ_SENT:
3605 case IB_CM_REP_RCVD:
3606 case IB_CM_MRA_REP_SENT:
3607 case IB_CM_REP_SENT:
3608 case IB_CM_MRA_REP_RCVD:
3609 case IB_CM_ESTABLISHED:
3610 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3611 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
3612 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3613 if (!cm_id_priv->av.valid) {
3614 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3617 if (cm_id_priv->av.ah_attr.vlan_id != 0xffff) {
3618 qp_attr->vlan_id = cm_id_priv->av.ah_attr.vlan_id;
3619 *qp_attr_mask |= IB_QP_VID;
3621 if (!is_zero_ether_addr(cm_id_priv->av.smac)) {
3622 memcpy(qp_attr->smac, cm_id_priv->av.smac,
3623 sizeof(qp_attr->smac));
3624 *qp_attr_mask |= IB_QP_SMAC;
3626 if (cm_id_priv->alt_av.valid) {
3627 if (cm_id_priv->alt_av.ah_attr.vlan_id != 0xffff) {
3628 qp_attr->alt_vlan_id =
3629 cm_id_priv->alt_av.ah_attr.vlan_id;
3630 *qp_attr_mask |= IB_QP_ALT_VID;
3632 if (!is_zero_ether_addr(cm_id_priv->alt_av.smac)) {
3633 memcpy(qp_attr->alt_smac,
3634 cm_id_priv->alt_av.smac,
3635 sizeof(qp_attr->alt_smac));
3636 *qp_attr_mask |= IB_QP_ALT_SMAC;
3639 qp_attr->path_mtu = cm_id_priv->path_mtu;
3640 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3641 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3642 if (cm_id_priv->qp_type == IB_QPT_RC ||
3643 cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
3644 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
3645 IB_QP_MIN_RNR_TIMER;
3646 qp_attr->max_dest_rd_atomic =
3647 cm_id_priv->responder_resources;
3648 qp_attr->min_rnr_timer = 0;
3650 if (cm_id_priv->alt_av.ah_attr.dlid) {
3651 *qp_attr_mask |= IB_QP_ALT_PATH;
3652 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3653 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3654 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3655 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3663 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3667 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3668 struct ib_qp_attr *qp_attr,
3671 unsigned long flags;
3674 spin_lock_irqsave(&cm_id_priv->lock, flags);
3675 switch (cm_id_priv->id.state) {
3676 /* Allow transition to RTS before sending REP */
3677 case IB_CM_REQ_RCVD:
3678 case IB_CM_MRA_REQ_SENT:
3680 case IB_CM_REP_RCVD:
3681 case IB_CM_MRA_REP_SENT:
3682 case IB_CM_REP_SENT:
3683 case IB_CM_MRA_REP_RCVD:
3684 case IB_CM_ESTABLISHED:
3685 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
3686 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
3687 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3688 switch (cm_id_priv->qp_type) {
3690 case IB_QPT_XRC_INI:
3691 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
3692 IB_QP_MAX_QP_RD_ATOMIC;
3693 qp_attr->retry_cnt = cm_id_priv->retry_count;
3694 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3695 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
3697 case IB_QPT_XRC_TGT:
3698 *qp_attr_mask |= IB_QP_TIMEOUT;
3699 qp_attr->timeout = cm_id_priv->av.timeout;
3704 if (cm_id_priv->alt_av.ah_attr.dlid) {
3705 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3706 qp_attr->path_mig_state = IB_MIG_REARM;
3709 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
3710 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3711 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3712 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3713 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3714 qp_attr->path_mig_state = IB_MIG_REARM;
3722 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3726 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3727 struct ib_qp_attr *qp_attr,
3730 struct cm_id_private *cm_id_priv;
3733 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3734 switch (qp_attr->qp_state) {
3736 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3739 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3742 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3750 EXPORT_SYMBOL(ib_cm_init_qp_attr);
3752 static void cm_get_ack_delay(struct cm_device *cm_dev)
3754 struct ib_device_attr attr;
3756 if (ib_query_device(cm_dev->ib_device, &attr))
3757 cm_dev->ack_delay = 0; /* acks will rely on packet life time */
3759 cm_dev->ack_delay = attr.local_ca_ack_delay;
3762 static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
3765 struct cm_counter_group *group;
3766 struct cm_counter_attribute *cm_attr;
3768 group = container_of(obj, struct cm_counter_group, obj);
3769 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
3771 return sprintf(buf, "%ld\n",
3772 atomic_long_read(&group->counter[cm_attr->index]));
3775 static const struct sysfs_ops cm_counter_ops = {
3776 .show = cm_show_counter
3779 static struct kobj_type cm_counter_obj_type = {
3780 .sysfs_ops = &cm_counter_ops,
3781 .default_attrs = cm_counter_default_attrs
3784 static void cm_release_port_obj(struct kobject *obj)
3786 struct cm_port *cm_port;
3788 cm_port = container_of(obj, struct cm_port, port_obj);
3792 static struct kobj_type cm_port_obj_type = {
3793 .release = cm_release_port_obj
3796 static char *cm_devnode(struct device *dev, umode_t *mode)
3800 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
3803 struct class cm_class = {
3804 .owner = THIS_MODULE,
3805 .name = "infiniband_cm",
3806 .devnode = cm_devnode,
3808 EXPORT_SYMBOL(cm_class);
3810 static int cm_create_port_fs(struct cm_port *port)
3814 ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
3815 &port->cm_dev->device->kobj,
3816 "%d", port->port_num);
3822 for (i = 0; i < CM_COUNTER_GROUPS; i++) {
3823 ret = kobject_init_and_add(&port->counter_group[i].obj,
3824 &cm_counter_obj_type,
3826 "%s", counter_group_names[i]);
3835 kobject_put(&port->counter_group[i].obj);
3836 kobject_put(&port->port_obj);
3841 static void cm_remove_port_fs(struct cm_port *port)
3845 for (i = 0; i < CM_COUNTER_GROUPS; i++)
3846 kobject_put(&port->counter_group[i].obj);
3848 kobject_put(&port->port_obj);
3851 static void cm_add_one(struct ib_device *ib_device)
3853 struct cm_device *cm_dev;
3854 struct cm_port *port;
3855 struct ib_mad_reg_req reg_req = {
3856 .mgmt_class = IB_MGMT_CLASS_CM,
3857 .mgmt_class_version = IB_CM_CLASS_VERSION,
3859 struct ib_port_modify port_modify = {
3860 .set_port_cap_mask = IB_PORT_CM_SUP
3862 unsigned long flags;
3867 cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
3868 ib_device->phys_port_cnt, GFP_KERNEL);
3872 cm_dev->ib_device = ib_device;
3873 cm_get_ack_delay(cm_dev);
3874 cm_dev->going_down = 0;
3875 cm_dev->device = device_create(&cm_class, &ib_device->dev,
3877 "%s", ib_device->name);
3878 if (IS_ERR(cm_dev->device)) {
3883 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3884 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
3885 if (!rdma_cap_ib_cm(ib_device, i))
3888 port = kzalloc(sizeof *port, GFP_KERNEL);
3892 cm_dev->port[i-1] = port;
3893 port->cm_dev = cm_dev;
3896 ret = cm_create_port_fs(port);
3900 port->mad_agent = ib_register_mad_agent(ib_device, i,
3908 if (IS_ERR(port->mad_agent))
3911 ret = ib_modify_port(ib_device, i, 0, &port_modify);
3921 ib_set_client_data(ib_device, &cm_client, cm_dev);
3923 write_lock_irqsave(&cm.device_lock, flags);
3924 list_add_tail(&cm_dev->list, &cm.device_list);
3925 write_unlock_irqrestore(&cm.device_lock, flags);
3929 ib_unregister_mad_agent(port->mad_agent);
3931 cm_remove_port_fs(port);
3933 port_modify.set_port_cap_mask = 0;
3934 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
3936 if (!rdma_cap_ib_cm(ib_device, i))
3939 port = cm_dev->port[i-1];
3940 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
3941 ib_unregister_mad_agent(port->mad_agent);
3942 cm_remove_port_fs(port);
3945 device_unregister(cm_dev->device);
3949 static void cm_remove_one(struct ib_device *ib_device, void *client_data)
3951 struct cm_device *cm_dev = client_data;
3952 struct cm_port *port;
3953 struct ib_port_modify port_modify = {
3954 .clr_port_cap_mask = IB_PORT_CM_SUP
3956 unsigned long flags;
3962 write_lock_irqsave(&cm.device_lock, flags);
3963 list_del(&cm_dev->list);
3964 write_unlock_irqrestore(&cm.device_lock, flags);
3966 spin_lock_irq(&cm.lock);
3967 cm_dev->going_down = 1;
3968 spin_unlock_irq(&cm.lock);
3970 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
3971 if (!rdma_cap_ib_cm(ib_device, i))
3974 port = cm_dev->port[i-1];
3975 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
3977 * We flush the queue here after the going_down set, this
3978 * verify that no new works will be queued in the recv handler,
3979 * after that we can call the unregister_mad_agent
3981 flush_workqueue(cm.wq);
3982 ib_unregister_mad_agent(port->mad_agent);
3983 cm_remove_port_fs(port);
3985 device_unregister(cm_dev->device);
3989 static int __init ib_cm_init(void)
3993 memset(&cm, 0, sizeof cm);
3994 INIT_LIST_HEAD(&cm.device_list);
3995 rwlock_init(&cm.device_lock);
3996 spin_lock_init(&cm.lock);
3997 cm.listen_service_table = RB_ROOT;
3998 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
3999 cm.remote_id_table = RB_ROOT;
4000 cm.remote_qp_table = RB_ROOT;
4001 cm.remote_sidr_table = RB_ROOT;
4002 idr_init(&cm.local_id_table);
4003 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4004 INIT_LIST_HEAD(&cm.timewait_list);
4006 ret = class_register(&cm_class);
4012 cm.wq = create_workqueue("ib_cm");
4018 ret = ib_register_client(&cm_client);
4024 destroy_workqueue(cm.wq);
4026 class_unregister(&cm_class);
4028 idr_destroy(&cm.local_id_table);
4032 static void __exit ib_cm_cleanup(void)
4034 struct cm_timewait_info *timewait_info, *tmp;
4036 spin_lock_irq(&cm.lock);
4037 list_for_each_entry(timewait_info, &cm.timewait_list, list)
4038 cancel_delayed_work(&timewait_info->work.work);
4039 spin_unlock_irq(&cm.lock);
4041 ib_unregister_client(&cm_client);
4042 destroy_workqueue(cm.wq);
4044 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4045 list_del(&timewait_info->list);
4046 kfree(timewait_info);
4049 class_unregister(&cm_class);
4050 idr_destroy(&cm.local_id_table);
4053 module_init(ib_cm_init);
4054 module_exit(ib_cm_cleanup);