2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2006 Intel Corporation. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/err.h>
38 #include <linux/random.h>
39 #include <linux/spinlock.h>
40 #include <linux/slab.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kref.h>
43 #include <linux/idr.h>
44 #include <linux/workqueue.h>
45 #include <uapi/linux/if_ether.h>
46 #include <rdma/ib_pack.h>
47 #include <rdma/ib_cache.h>
48 #include <rdma/rdma_netlink.h>
49 #include <net/netlink.h>
50 #include <uapi/rdma/ib_user_sa.h>
51 #include <rdma/ib_marshall.h>
54 MODULE_AUTHOR("Roland Dreier");
55 MODULE_DESCRIPTION("InfiniBand subnet administration query support");
56 MODULE_LICENSE("Dual BSD/GPL");
58 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
59 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
60 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
61 static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT;
71 struct ib_mad_agent *agent;
72 struct ib_sa_sm_ah *sm_ah;
73 struct work_struct update_task;
79 int start_port, end_port;
80 struct ib_event_handler event_handler;
81 struct ib_sa_port port[0];
85 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
86 void (*release)(struct ib_sa_query *);
87 struct ib_sa_client *client;
88 struct ib_sa_port *port;
89 struct ib_mad_send_buf *mad_buf;
90 struct ib_sa_sm_ah *sm_ah;
93 struct list_head list; /* Local svc request list */
94 u32 seq; /* Local svc request sequence number */
95 unsigned long timeout; /* Local svc timeout */
96 u8 path_use; /* How will the pathrecord be used */
99 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
100 #define IB_SA_CANCEL 0x00000002
102 struct ib_sa_service_query {
103 void (*callback)(int, struct ib_sa_service_rec *, void *);
105 struct ib_sa_query sa_query;
108 struct ib_sa_path_query {
109 void (*callback)(int, struct ib_sa_path_rec *, void *);
111 struct ib_sa_query sa_query;
114 struct ib_sa_guidinfo_query {
115 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
117 struct ib_sa_query sa_query;
120 struct ib_sa_mcmember_query {
121 void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
123 struct ib_sa_query sa_query;
126 static LIST_HEAD(ib_nl_request_list);
127 static DEFINE_SPINLOCK(ib_nl_request_lock);
128 static atomic_t ib_nl_sa_request_seq;
129 static struct workqueue_struct *ib_nl_wq;
130 static struct delayed_work ib_nl_timed_work;
131 static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
132 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY,
133 .len = sizeof(struct ib_path_rec_data)},
134 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32},
135 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64},
136 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
137 .len = sizeof(struct rdma_nla_ls_gid)},
138 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY,
139 .len = sizeof(struct rdma_nla_ls_gid)},
140 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8},
141 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16},
142 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16},
146 static void ib_sa_add_one(struct ib_device *device);
147 static void ib_sa_remove_one(struct ib_device *device, void *client_data);
149 static struct ib_client sa_client = {
151 .add = ib_sa_add_one,
152 .remove = ib_sa_remove_one
155 static DEFINE_SPINLOCK(idr_lock);
156 static DEFINE_IDR(query_idr);
158 static DEFINE_SPINLOCK(tid_lock);
161 #define PATH_REC_FIELD(field) \
162 .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \
163 .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \
164 .field_name = "sa_path_rec:" #field
166 static const struct ib_field path_rec_table[] = {
167 { PATH_REC_FIELD(service_id),
171 { PATH_REC_FIELD(dgid),
175 { PATH_REC_FIELD(sgid),
179 { PATH_REC_FIELD(dlid),
183 { PATH_REC_FIELD(slid),
187 { PATH_REC_FIELD(raw_traffic),
195 { PATH_REC_FIELD(flow_label),
199 { PATH_REC_FIELD(hop_limit),
203 { PATH_REC_FIELD(traffic_class),
207 { PATH_REC_FIELD(reversible),
211 { PATH_REC_FIELD(numb_path),
215 { PATH_REC_FIELD(pkey),
219 { PATH_REC_FIELD(qos_class),
223 { PATH_REC_FIELD(sl),
227 { PATH_REC_FIELD(mtu_selector),
231 { PATH_REC_FIELD(mtu),
235 { PATH_REC_FIELD(rate_selector),
239 { PATH_REC_FIELD(rate),
243 { PATH_REC_FIELD(packet_life_time_selector),
247 { PATH_REC_FIELD(packet_life_time),
251 { PATH_REC_FIELD(preference),
261 #define MCMEMBER_REC_FIELD(field) \
262 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
263 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
264 .field_name = "sa_mcmember_rec:" #field
266 static const struct ib_field mcmember_rec_table[] = {
267 { MCMEMBER_REC_FIELD(mgid),
271 { MCMEMBER_REC_FIELD(port_gid),
275 { MCMEMBER_REC_FIELD(qkey),
279 { MCMEMBER_REC_FIELD(mlid),
283 { MCMEMBER_REC_FIELD(mtu_selector),
287 { MCMEMBER_REC_FIELD(mtu),
291 { MCMEMBER_REC_FIELD(traffic_class),
295 { MCMEMBER_REC_FIELD(pkey),
299 { MCMEMBER_REC_FIELD(rate_selector),
303 { MCMEMBER_REC_FIELD(rate),
307 { MCMEMBER_REC_FIELD(packet_life_time_selector),
311 { MCMEMBER_REC_FIELD(packet_life_time),
315 { MCMEMBER_REC_FIELD(sl),
319 { MCMEMBER_REC_FIELD(flow_label),
323 { MCMEMBER_REC_FIELD(hop_limit),
327 { MCMEMBER_REC_FIELD(scope),
331 { MCMEMBER_REC_FIELD(join_state),
335 { MCMEMBER_REC_FIELD(proxy_join),
345 #define SERVICE_REC_FIELD(field) \
346 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
347 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
348 .field_name = "sa_service_rec:" #field
350 static const struct ib_field service_rec_table[] = {
351 { SERVICE_REC_FIELD(id),
355 { SERVICE_REC_FIELD(gid),
359 { SERVICE_REC_FIELD(pkey),
363 { SERVICE_REC_FIELD(lease),
367 { SERVICE_REC_FIELD(key),
371 { SERVICE_REC_FIELD(name),
375 { SERVICE_REC_FIELD(data8),
379 { SERVICE_REC_FIELD(data16),
383 { SERVICE_REC_FIELD(data32),
387 { SERVICE_REC_FIELD(data64),
393 #define GUIDINFO_REC_FIELD(field) \
394 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
395 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
396 .field_name = "sa_guidinfo_rec:" #field
398 static const struct ib_field guidinfo_rec_table[] = {
399 { GUIDINFO_REC_FIELD(lid),
403 { GUIDINFO_REC_FIELD(block_num),
407 { GUIDINFO_REC_FIELD(res1),
411 { GUIDINFO_REC_FIELD(res2),
415 { GUIDINFO_REC_FIELD(guid_info_list),
421 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
423 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
426 static inline int ib_sa_query_cancelled(struct ib_sa_query *query)
428 return (query->flags & IB_SA_CANCEL);
431 static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
432 struct ib_sa_query *query)
434 struct ib_sa_path_rec *sa_rec = query->mad_buf->context[1];
435 struct ib_sa_mad *mad = query->mad_buf->mad;
436 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask;
439 struct rdma_ls_resolve_header *header;
441 query->mad_buf->context[1] = NULL;
443 /* Construct the family header first */
444 header = (struct rdma_ls_resolve_header *)
445 skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
446 memcpy(header->device_name, query->port->agent->device->name,
448 header->port_num = query->port->port_num;
450 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
451 sa_rec->reversible != 0)
452 query->path_use = LS_RESOLVE_PATH_USE_GMP;
454 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
455 header->path_use = query->path_use;
457 /* Now build the attributes */
458 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
459 val64 = be64_to_cpu(sa_rec->service_id);
460 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
461 sizeof(val64), &val64);
463 if (comp_mask & IB_SA_PATH_REC_DGID)
464 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID,
465 sizeof(sa_rec->dgid), &sa_rec->dgid);
466 if (comp_mask & IB_SA_PATH_REC_SGID)
467 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID,
468 sizeof(sa_rec->sgid), &sa_rec->sgid);
469 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
470 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS,
471 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class);
473 if (comp_mask & IB_SA_PATH_REC_PKEY) {
474 val16 = be16_to_cpu(sa_rec->pkey);
475 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY,
476 sizeof(val16), &val16);
478 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) {
479 val16 = be16_to_cpu(sa_rec->qos_class);
480 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS,
481 sizeof(val16), &val16);
485 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
489 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID)
490 len += nla_total_size(sizeof(u64));
491 if (comp_mask & IB_SA_PATH_REC_DGID)
492 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
493 if (comp_mask & IB_SA_PATH_REC_SGID)
494 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
495 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
496 len += nla_total_size(sizeof(u8));
497 if (comp_mask & IB_SA_PATH_REC_PKEY)
498 len += nla_total_size(sizeof(u16));
499 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS)
500 len += nla_total_size(sizeof(u16));
503 * Make sure that at least some of the required comp_mask bits are
506 if (WARN_ON(len == 0))
509 /* Add the family header */
510 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header));
515 static int ib_nl_send_msg(struct ib_sa_query *query)
517 struct sk_buff *skb = NULL;
518 struct nlmsghdr *nlh;
521 struct ib_sa_mad *mad;
524 mad = query->mad_buf->mad;
525 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
529 skb = nlmsg_new(len, GFP_KERNEL);
533 /* Put nlmsg header only for now */
534 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
535 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
542 ib_nl_set_path_rec_attrs(skb, query);
544 /* Repair the nlmsg header length */
547 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, GFP_KERNEL);
556 static int ib_nl_make_request(struct ib_sa_query *query)
562 INIT_LIST_HEAD(&query->list);
563 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
565 spin_lock_irqsave(&ib_nl_request_lock, flags);
566 ret = ib_nl_send_msg(query);
574 delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
575 query->timeout = delay + jiffies;
576 list_add_tail(&query->list, &ib_nl_request_list);
577 /* Start the timeout if this is the only request */
578 if (ib_nl_request_list.next == &query->list)
579 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
582 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
587 static int ib_nl_cancel_request(struct ib_sa_query *query)
590 struct ib_sa_query *wait_query;
593 spin_lock_irqsave(&ib_nl_request_lock, flags);
594 list_for_each_entry(wait_query, &ib_nl_request_list, list) {
595 /* Let the timeout to take care of the callback */
596 if (query == wait_query) {
597 query->flags |= IB_SA_CANCEL;
598 query->timeout = jiffies;
599 list_move(&query->list, &ib_nl_request_list);
601 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1);
605 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
610 static void send_handler(struct ib_mad_agent *agent,
611 struct ib_mad_send_wc *mad_send_wc);
613 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
614 const struct nlmsghdr *nlh)
616 struct ib_mad_send_wc mad_send_wc;
617 struct ib_sa_mad *mad = NULL;
618 const struct nlattr *head, *curr;
619 struct ib_path_rec_data *rec;
624 if (query->callback) {
625 head = (const struct nlattr *) nlmsg_data(nlh);
626 len = nlmsg_len(nlh);
627 switch (query->path_use) {
628 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
629 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
632 case LS_RESOLVE_PATH_USE_ALL:
633 case LS_RESOLVE_PATH_USE_GMP:
635 mask = IB_PATH_PRIMARY | IB_PATH_GMP |
636 IB_PATH_BIDIRECTIONAL;
639 nla_for_each_attr(curr, head, len, rem) {
640 if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) {
641 rec = nla_data(curr);
643 * Get the first one. In the future, we may
644 * need to get up to 6 pathrecords.
646 if ((rec->flags & mask) == mask) {
647 mad = query->mad_buf->mad;
648 mad->mad_hdr.method |=
650 memcpy(mad->data, rec->path_rec,
651 sizeof(rec->path_rec));
657 query->callback(query, status, mad);
660 mad_send_wc.send_buf = query->mad_buf;
661 mad_send_wc.status = IB_WC_SUCCESS;
662 send_handler(query->mad_buf->mad_agent, &mad_send_wc);
665 static void ib_nl_request_timeout(struct work_struct *work)
668 struct ib_sa_query *query;
670 struct ib_mad_send_wc mad_send_wc;
673 spin_lock_irqsave(&ib_nl_request_lock, flags);
674 while (!list_empty(&ib_nl_request_list)) {
675 query = list_entry(ib_nl_request_list.next,
676 struct ib_sa_query, list);
678 if (time_after(query->timeout, jiffies)) {
679 delay = query->timeout - jiffies;
680 if ((long)delay <= 0)
682 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
686 list_del(&query->list);
687 ib_sa_disable_local_svc(query);
688 /* Hold the lock to protect against query cancellation */
689 if (ib_sa_query_cancelled(query))
692 ret = ib_post_send_mad(query->mad_buf, NULL);
694 mad_send_wc.send_buf = query->mad_buf;
695 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
696 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
697 send_handler(query->port->agent, &mad_send_wc);
698 spin_lock_irqsave(&ib_nl_request_lock, flags);
701 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
704 static int ib_nl_handle_set_timeout(struct sk_buff *skb,
705 struct netlink_callback *cb)
707 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
708 int timeout, delta, abs_delta;
709 const struct nlattr *attr;
711 struct ib_sa_query *query;
713 struct nlattr *tb[LS_NLA_TYPE_MAX];
716 if (!netlink_capable(skb, CAP_NET_ADMIN))
719 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
720 nlmsg_len(nlh), ib_nl_policy);
721 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
725 timeout = *(int *) nla_data(attr);
726 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN)
727 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN;
728 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
729 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
731 delta = timeout - sa_local_svc_timeout_ms;
738 spin_lock_irqsave(&ib_nl_request_lock, flags);
739 sa_local_svc_timeout_ms = timeout;
740 list_for_each_entry(query, &ib_nl_request_list, list) {
741 if (delta < 0 && abs_delta > query->timeout)
744 query->timeout += delta;
746 /* Get the new delay from the first entry */
748 delay = query->timeout - jiffies;
754 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
755 (unsigned long)delay);
756 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
763 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
765 struct nlattr *tb[LS_NLA_TYPE_MAX];
768 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
771 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
772 nlmsg_len(nlh), ib_nl_policy);
779 static int ib_nl_handle_resolve_resp(struct sk_buff *skb,
780 struct netlink_callback *cb)
782 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
784 struct ib_sa_query *query;
785 struct ib_mad_send_buf *send_buf;
786 struct ib_mad_send_wc mad_send_wc;
790 if (!netlink_capable(skb, CAP_NET_ADMIN))
793 spin_lock_irqsave(&ib_nl_request_lock, flags);
794 list_for_each_entry(query, &ib_nl_request_list, list) {
796 * If the query is cancelled, let the timeout routine
799 if (nlh->nlmsg_seq == query->seq) {
800 found = !ib_sa_query_cancelled(query);
802 list_del(&query->list);
808 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
812 send_buf = query->mad_buf;
814 if (!ib_nl_is_good_resolve_resp(nlh)) {
815 /* if the result is a failure, send out the packet via IB */
816 ib_sa_disable_local_svc(query);
817 ret = ib_post_send_mad(query->mad_buf, NULL);
818 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
820 mad_send_wc.send_buf = send_buf;
821 mad_send_wc.status = IB_WC_GENERAL_ERR;
822 send_handler(query->port->agent, &mad_send_wc);
825 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
826 ib_nl_process_good_resolve_rsp(query, nlh);
833 static struct ibnl_client_cbs ib_sa_cb_table[] = {
834 [RDMA_NL_LS_OP_RESOLVE] = {
835 .dump = ib_nl_handle_resolve_resp,
836 .module = THIS_MODULE },
837 [RDMA_NL_LS_OP_SET_TIMEOUT] = {
838 .dump = ib_nl_handle_set_timeout,
839 .module = THIS_MODULE },
842 static void free_sm_ah(struct kref *kref)
844 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
846 ib_destroy_ah(sm_ah->ah);
850 static void update_sm_ah(struct work_struct *work)
852 struct ib_sa_port *port =
853 container_of(work, struct ib_sa_port, update_task);
854 struct ib_sa_sm_ah *new_ah;
855 struct ib_port_attr port_attr;
856 struct ib_ah_attr ah_attr;
858 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
859 printk(KERN_WARNING "Couldn't query port\n");
863 new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL);
865 printk(KERN_WARNING "Couldn't allocate new SM AH\n");
869 kref_init(&new_ah->ref);
870 new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
872 new_ah->pkey_index = 0;
873 if (ib_find_pkey(port->agent->device, port->port_num,
874 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
875 printk(KERN_ERR "Couldn't find index for default PKey\n");
877 memset(&ah_attr, 0, sizeof ah_attr);
878 ah_attr.dlid = port_attr.sm_lid;
879 ah_attr.sl = port_attr.sm_sl;
880 ah_attr.port_num = port->port_num;
882 new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr);
883 if (IS_ERR(new_ah->ah)) {
884 printk(KERN_WARNING "Couldn't create new SM AH\n");
889 spin_lock_irq(&port->ah_lock);
891 kref_put(&port->sm_ah->ref, free_sm_ah);
892 port->sm_ah = new_ah;
893 spin_unlock_irq(&port->ah_lock);
897 static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event)
899 if (event->event == IB_EVENT_PORT_ERR ||
900 event->event == IB_EVENT_PORT_ACTIVE ||
901 event->event == IB_EVENT_LID_CHANGE ||
902 event->event == IB_EVENT_PKEY_CHANGE ||
903 event->event == IB_EVENT_SM_CHANGE ||
904 event->event == IB_EVENT_CLIENT_REREGISTER) {
906 struct ib_sa_device *sa_dev =
907 container_of(handler, typeof(*sa_dev), event_handler);
908 struct ib_sa_port *port =
909 &sa_dev->port[event->element.port_num - sa_dev->start_port];
911 if (!rdma_cap_ib_sa(handler->device, port->port_num))
914 spin_lock_irqsave(&port->ah_lock, flags);
916 kref_put(&port->sm_ah->ref, free_sm_ah);
918 spin_unlock_irqrestore(&port->ah_lock, flags);
920 queue_work(ib_wq, &sa_dev->port[event->element.port_num -
921 sa_dev->start_port].update_task);
925 void ib_sa_register_client(struct ib_sa_client *client)
927 atomic_set(&client->users, 1);
928 init_completion(&client->comp);
930 EXPORT_SYMBOL(ib_sa_register_client);
932 void ib_sa_unregister_client(struct ib_sa_client *client)
934 ib_sa_client_put(client);
935 wait_for_completion(&client->comp);
937 EXPORT_SYMBOL(ib_sa_unregister_client);
940 * ib_sa_cancel_query - try to cancel an SA query
941 * @id:ID of query to cancel
942 * @query:query pointer to cancel
944 * Try to cancel an SA query. If the id and query don't match up or
945 * the query has already completed, nothing is done. Otherwise the
946 * query is canceled and will complete with a status of -EINTR.
948 void ib_sa_cancel_query(int id, struct ib_sa_query *query)
951 struct ib_mad_agent *agent;
952 struct ib_mad_send_buf *mad_buf;
954 spin_lock_irqsave(&idr_lock, flags);
955 if (idr_find(&query_idr, id) != query) {
956 spin_unlock_irqrestore(&idr_lock, flags);
959 agent = query->port->agent;
960 mad_buf = query->mad_buf;
961 spin_unlock_irqrestore(&idr_lock, flags);
964 * If the query is still on the netlink request list, schedule
965 * it to be cancelled by the timeout routine. Otherwise, it has been
966 * sent to the MAD layer and has to be cancelled from there.
968 if (!ib_nl_cancel_request(query))
969 ib_cancel_mad(agent, mad_buf);
971 EXPORT_SYMBOL(ib_sa_cancel_query);
973 static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
975 struct ib_sa_device *sa_dev;
976 struct ib_sa_port *port;
980 sa_dev = ib_get_client_data(device, &sa_client);
984 port = &sa_dev->port[port_num - sa_dev->start_port];
985 spin_lock_irqsave(&port->ah_lock, flags);
986 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
987 spin_unlock_irqrestore(&port->ah_lock, flags);
989 return src_path_mask;
992 int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
993 struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr)
999 memset(ah_attr, 0, sizeof *ah_attr);
1000 ah_attr->dlid = be16_to_cpu(rec->dlid);
1001 ah_attr->sl = rec->sl;
1002 ah_attr->src_path_bits = be16_to_cpu(rec->slid) &
1003 get_src_path_mask(device, port_num);
1004 ah_attr->port_num = port_num;
1005 ah_attr->static_rate = rec->rate;
1007 force_grh = rdma_cap_eth_ah(device, port_num);
1009 if (rec->hop_limit > 1 || force_grh) {
1010 struct net_device *ndev = ib_get_ndev_from_path(rec);
1012 ah_attr->ah_flags = IB_AH_GRH;
1013 ah_attr->grh.dgid = rec->dgid;
1015 ret = ib_find_cached_gid(device, &rec->sgid, ndev, &port_num,
1023 ah_attr->grh.sgid_index = gid_index;
1024 ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label);
1025 ah_attr->grh.hop_limit = rec->hop_limit;
1026 ah_attr->grh.traffic_class = rec->traffic_class;
1031 memcpy(ah_attr->dmac, rec->dmac, ETH_ALEN);
1035 EXPORT_SYMBOL(ib_init_ah_from_path);
1037 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
1039 unsigned long flags;
1041 spin_lock_irqsave(&query->port->ah_lock, flags);
1042 if (!query->port->sm_ah) {
1043 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1046 kref_get(&query->port->sm_ah->ref);
1047 query->sm_ah = query->port->sm_ah;
1048 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1050 query->mad_buf = ib_create_send_mad(query->port->agent, 1,
1051 query->sm_ah->pkey_index,
1052 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
1054 IB_MGMT_BASE_VERSION);
1055 if (IS_ERR(query->mad_buf)) {
1056 kref_put(&query->sm_ah->ref, free_sm_ah);
1060 query->mad_buf->ah = query->sm_ah->ah;
1065 static void free_mad(struct ib_sa_query *query)
1067 ib_free_send_mad(query->mad_buf);
1068 kref_put(&query->sm_ah->ref, free_sm_ah);
1071 static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
1073 unsigned long flags;
1075 memset(mad, 0, sizeof *mad);
1077 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
1078 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
1079 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
1081 spin_lock_irqsave(&tid_lock, flags);
1083 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
1084 spin_unlock_irqrestore(&tid_lock, flags);
1087 static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
1089 bool preload = gfpflags_allow_blocking(gfp_mask);
1090 unsigned long flags;
1094 idr_preload(gfp_mask);
1095 spin_lock_irqsave(&idr_lock, flags);
1097 id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
1099 spin_unlock_irqrestore(&idr_lock, flags);
1105 query->mad_buf->timeout_ms = timeout_ms;
1106 query->mad_buf->context[0] = query;
1109 if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) {
1110 if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) {
1111 if (!ib_nl_make_request(query))
1114 ib_sa_disable_local_svc(query);
1117 ret = ib_post_send_mad(query->mad_buf, NULL);
1119 spin_lock_irqsave(&idr_lock, flags);
1120 idr_remove(&query_idr, id);
1121 spin_unlock_irqrestore(&idr_lock, flags);
1125 * It's not safe to dereference query any more, because the
1126 * send may already have completed and freed the query in
1129 return ret ? ret : id;
1132 void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec)
1134 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
1136 EXPORT_SYMBOL(ib_sa_unpack_path);
1138 void ib_sa_pack_path(struct ib_sa_path_rec *rec, void *attribute)
1140 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute);
1142 EXPORT_SYMBOL(ib_sa_pack_path);
1144 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
1146 struct ib_sa_mad *mad)
1148 struct ib_sa_path_query *query =
1149 container_of(sa_query, struct ib_sa_path_query, sa_query);
1152 struct ib_sa_path_rec rec;
1154 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
1158 memset(rec.dmac, 0, ETH_ALEN);
1159 query->callback(status, &rec, query->context);
1161 query->callback(status, NULL, query->context);
1164 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
1166 kfree(container_of(sa_query, struct ib_sa_path_query, sa_query));
1170 * ib_sa_path_rec_get - Start a Path get query
1172 * @device:device to send query on
1173 * @port_num: port number to send query on
1174 * @rec:Path Record to send in query
1175 * @comp_mask:component mask to send in query
1176 * @timeout_ms:time to wait for response
1177 * @gfp_mask:GFP mask to use for internal allocations
1178 * @callback:function called when query completes, times out or is
1180 * @context:opaque user context passed to callback
1181 * @sa_query:query context, used to cancel query
1183 * Send a Path Record Get query to the SA to look up a path. The
1184 * callback function will be called when the query completes (or
1185 * fails); status is 0 for a successful response, -EINTR if the query
1186 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1187 * occurred sending the query. The resp parameter of the callback is
1188 * only valid if status is 0.
1190 * If the return value of ib_sa_path_rec_get() is negative, it is an
1191 * error code. Otherwise it is a query ID that can be used to cancel
1194 int ib_sa_path_rec_get(struct ib_sa_client *client,
1195 struct ib_device *device, u8 port_num,
1196 struct ib_sa_path_rec *rec,
1197 ib_sa_comp_mask comp_mask,
1198 int timeout_ms, gfp_t gfp_mask,
1199 void (*callback)(int status,
1200 struct ib_sa_path_rec *resp,
1203 struct ib_sa_query **sa_query)
1205 struct ib_sa_path_query *query;
1206 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1207 struct ib_sa_port *port;
1208 struct ib_mad_agent *agent;
1209 struct ib_sa_mad *mad;
1215 port = &sa_dev->port[port_num - sa_dev->start_port];
1216 agent = port->agent;
1218 query = kzalloc(sizeof(*query), gfp_mask);
1222 query->sa_query.port = port;
1223 ret = alloc_mad(&query->sa_query, gfp_mask);
1227 ib_sa_client_get(client);
1228 query->sa_query.client = client;
1229 query->callback = callback;
1230 query->context = context;
1232 mad = query->sa_query.mad_buf->mad;
1233 init_mad(mad, agent);
1235 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
1236 query->sa_query.release = ib_sa_path_rec_release;
1237 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1238 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
1239 mad->sa_hdr.comp_mask = comp_mask;
1241 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data);
1243 *sa_query = &query->sa_query;
1245 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
1246 query->sa_query.mad_buf->context[1] = rec;
1248 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1256 ib_sa_client_put(query->sa_query.client);
1257 free_mad(&query->sa_query);
1263 EXPORT_SYMBOL(ib_sa_path_rec_get);
1265 static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
1267 struct ib_sa_mad *mad)
1269 struct ib_sa_service_query *query =
1270 container_of(sa_query, struct ib_sa_service_query, sa_query);
1273 struct ib_sa_service_rec rec;
1275 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
1277 query->callback(status, &rec, query->context);
1279 query->callback(status, NULL, query->context);
1282 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
1284 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
1288 * ib_sa_service_rec_query - Start Service Record operation
1290 * @device:device to send request on
1291 * @port_num: port number to send request on
1292 * @method:SA method - should be get, set, or delete
1293 * @rec:Service Record to send in request
1294 * @comp_mask:component mask to send in request
1295 * @timeout_ms:time to wait for response
1296 * @gfp_mask:GFP mask to use for internal allocations
1297 * @callback:function called when request completes, times out or is
1299 * @context:opaque user context passed to callback
1300 * @sa_query:request context, used to cancel request
1302 * Send a Service Record set/get/delete to the SA to register,
1303 * unregister or query a service record.
1304 * The callback function will be called when the request completes (or
1305 * fails); status is 0 for a successful response, -EINTR if the query
1306 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1307 * occurred sending the query. The resp parameter of the callback is
1308 * only valid if status is 0.
1310 * If the return value of ib_sa_service_rec_query() is negative, it is an
1311 * error code. Otherwise it is a request ID that can be used to cancel
1314 int ib_sa_service_rec_query(struct ib_sa_client *client,
1315 struct ib_device *device, u8 port_num, u8 method,
1316 struct ib_sa_service_rec *rec,
1317 ib_sa_comp_mask comp_mask,
1318 int timeout_ms, gfp_t gfp_mask,
1319 void (*callback)(int status,
1320 struct ib_sa_service_rec *resp,
1323 struct ib_sa_query **sa_query)
1325 struct ib_sa_service_query *query;
1326 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1327 struct ib_sa_port *port;
1328 struct ib_mad_agent *agent;
1329 struct ib_sa_mad *mad;
1335 port = &sa_dev->port[port_num - sa_dev->start_port];
1336 agent = port->agent;
1338 if (method != IB_MGMT_METHOD_GET &&
1339 method != IB_MGMT_METHOD_SET &&
1340 method != IB_SA_METHOD_DELETE)
1343 query = kzalloc(sizeof(*query), gfp_mask);
1347 query->sa_query.port = port;
1348 ret = alloc_mad(&query->sa_query, gfp_mask);
1352 ib_sa_client_get(client);
1353 query->sa_query.client = client;
1354 query->callback = callback;
1355 query->context = context;
1357 mad = query->sa_query.mad_buf->mad;
1358 init_mad(mad, agent);
1360 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
1361 query->sa_query.release = ib_sa_service_rec_release;
1362 mad->mad_hdr.method = method;
1363 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
1364 mad->sa_hdr.comp_mask = comp_mask;
1366 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
1369 *sa_query = &query->sa_query;
1371 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1379 ib_sa_client_put(query->sa_query.client);
1380 free_mad(&query->sa_query);
1386 EXPORT_SYMBOL(ib_sa_service_rec_query);
1388 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
1390 struct ib_sa_mad *mad)
1392 struct ib_sa_mcmember_query *query =
1393 container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
1396 struct ib_sa_mcmember_rec rec;
1398 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1400 query->callback(status, &rec, query->context);
1402 query->callback(status, NULL, query->context);
1405 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
1407 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
1410 int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
1411 struct ib_device *device, u8 port_num,
1413 struct ib_sa_mcmember_rec *rec,
1414 ib_sa_comp_mask comp_mask,
1415 int timeout_ms, gfp_t gfp_mask,
1416 void (*callback)(int status,
1417 struct ib_sa_mcmember_rec *resp,
1420 struct ib_sa_query **sa_query)
1422 struct ib_sa_mcmember_query *query;
1423 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1424 struct ib_sa_port *port;
1425 struct ib_mad_agent *agent;
1426 struct ib_sa_mad *mad;
1432 port = &sa_dev->port[port_num - sa_dev->start_port];
1433 agent = port->agent;
1435 query = kzalloc(sizeof(*query), gfp_mask);
1439 query->sa_query.port = port;
1440 ret = alloc_mad(&query->sa_query, gfp_mask);
1444 ib_sa_client_get(client);
1445 query->sa_query.client = client;
1446 query->callback = callback;
1447 query->context = context;
1449 mad = query->sa_query.mad_buf->mad;
1450 init_mad(mad, agent);
1452 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
1453 query->sa_query.release = ib_sa_mcmember_rec_release;
1454 mad->mad_hdr.method = method;
1455 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
1456 mad->sa_hdr.comp_mask = comp_mask;
1458 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1461 *sa_query = &query->sa_query;
1463 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1471 ib_sa_client_put(query->sa_query.client);
1472 free_mad(&query->sa_query);
1479 /* Support GuidInfoRecord */
1480 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
1482 struct ib_sa_mad *mad)
1484 struct ib_sa_guidinfo_query *query =
1485 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
1488 struct ib_sa_guidinfo_rec rec;
1490 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
1492 query->callback(status, &rec, query->context);
1494 query->callback(status, NULL, query->context);
1497 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
1499 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
1502 int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
1503 struct ib_device *device, u8 port_num,
1504 struct ib_sa_guidinfo_rec *rec,
1505 ib_sa_comp_mask comp_mask, u8 method,
1506 int timeout_ms, gfp_t gfp_mask,
1507 void (*callback)(int status,
1508 struct ib_sa_guidinfo_rec *resp,
1511 struct ib_sa_query **sa_query)
1513 struct ib_sa_guidinfo_query *query;
1514 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1515 struct ib_sa_port *port;
1516 struct ib_mad_agent *agent;
1517 struct ib_sa_mad *mad;
1523 if (method != IB_MGMT_METHOD_GET &&
1524 method != IB_MGMT_METHOD_SET &&
1525 method != IB_SA_METHOD_DELETE) {
1529 port = &sa_dev->port[port_num - sa_dev->start_port];
1530 agent = port->agent;
1532 query = kzalloc(sizeof(*query), gfp_mask);
1536 query->sa_query.port = port;
1537 ret = alloc_mad(&query->sa_query, gfp_mask);
1541 ib_sa_client_get(client);
1542 query->sa_query.client = client;
1543 query->callback = callback;
1544 query->context = context;
1546 mad = query->sa_query.mad_buf->mad;
1547 init_mad(mad, agent);
1549 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
1550 query->sa_query.release = ib_sa_guidinfo_rec_release;
1552 mad->mad_hdr.method = method;
1553 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
1554 mad->sa_hdr.comp_mask = comp_mask;
1556 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
1559 *sa_query = &query->sa_query;
1561 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1569 ib_sa_client_put(query->sa_query.client);
1570 free_mad(&query->sa_query);
1576 EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
1578 static void send_handler(struct ib_mad_agent *agent,
1579 struct ib_mad_send_wc *mad_send_wc)
1581 struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
1582 unsigned long flags;
1584 if (query->callback)
1585 switch (mad_send_wc->status) {
1587 /* No callback -- already got recv */
1589 case IB_WC_RESP_TIMEOUT_ERR:
1590 query->callback(query, -ETIMEDOUT, NULL);
1592 case IB_WC_WR_FLUSH_ERR:
1593 query->callback(query, -EINTR, NULL);
1596 query->callback(query, -EIO, NULL);
1600 spin_lock_irqsave(&idr_lock, flags);
1601 idr_remove(&query_idr, query->id);
1602 spin_unlock_irqrestore(&idr_lock, flags);
1605 ib_sa_client_put(query->client);
1606 query->release(query);
1609 static void recv_handler(struct ib_mad_agent *mad_agent,
1610 struct ib_mad_recv_wc *mad_recv_wc)
1612 struct ib_sa_query *query;
1613 struct ib_mad_send_buf *mad_buf;
1615 mad_buf = (void *) (unsigned long) mad_recv_wc->wc->wr_id;
1616 query = mad_buf->context[0];
1618 if (query->callback) {
1619 if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
1620 query->callback(query,
1621 mad_recv_wc->recv_buf.mad->mad_hdr.status ?
1623 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
1625 query->callback(query, -EIO, NULL);
1628 ib_free_recv_mad(mad_recv_wc);
1631 static void ib_sa_add_one(struct ib_device *device)
1633 struct ib_sa_device *sa_dev;
1637 s = rdma_start_port(device);
1638 e = rdma_end_port(device);
1640 sa_dev = kzalloc(sizeof *sa_dev +
1641 (e - s + 1) * sizeof (struct ib_sa_port),
1646 sa_dev->start_port = s;
1647 sa_dev->end_port = e;
1649 for (i = 0; i <= e - s; ++i) {
1650 spin_lock_init(&sa_dev->port[i].ah_lock);
1651 if (!rdma_cap_ib_sa(device, i + 1))
1654 sa_dev->port[i].sm_ah = NULL;
1655 sa_dev->port[i].port_num = i + s;
1657 sa_dev->port[i].agent =
1658 ib_register_mad_agent(device, i + s, IB_QPT_GSI,
1659 NULL, 0, send_handler,
1660 recv_handler, sa_dev, 0);
1661 if (IS_ERR(sa_dev->port[i].agent))
1664 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
1672 ib_set_client_data(device, &sa_client, sa_dev);
1675 * We register our event handler after everything is set up,
1676 * and then update our cached info after the event handler is
1677 * registered to avoid any problems if a port changes state
1678 * during our initialization.
1681 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
1682 if (ib_register_event_handler(&sa_dev->event_handler))
1685 for (i = 0; i <= e - s; ++i) {
1686 if (rdma_cap_ib_sa(device, i + 1))
1687 update_sm_ah(&sa_dev->port[i].update_task);
1694 if (rdma_cap_ib_sa(device, i + 1))
1695 ib_unregister_mad_agent(sa_dev->port[i].agent);
1702 static void ib_sa_remove_one(struct ib_device *device, void *client_data)
1704 struct ib_sa_device *sa_dev = client_data;
1710 ib_unregister_event_handler(&sa_dev->event_handler);
1712 flush_workqueue(ib_wq);
1714 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
1715 if (rdma_cap_ib_sa(device, i + 1)) {
1716 ib_unregister_mad_agent(sa_dev->port[i].agent);
1717 if (sa_dev->port[i].sm_ah)
1718 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
1726 static int __init ib_sa_init(void)
1730 get_random_bytes(&tid, sizeof tid);
1732 atomic_set(&ib_nl_sa_request_seq, 0);
1734 ret = ib_register_client(&sa_client);
1736 printk(KERN_ERR "Couldn't register ib_sa client\n");
1742 printk(KERN_ERR "Couldn't initialize multicast handling\n");
1746 ib_nl_wq = create_singlethread_workqueue("ib_nl_sa_wq");
1752 if (ibnl_add_client(RDMA_NL_LS, RDMA_NL_LS_NUM_OPS,
1754 pr_err("Failed to add netlink callback\n");
1758 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
1762 destroy_workqueue(ib_nl_wq);
1766 ib_unregister_client(&sa_client);
1771 static void __exit ib_sa_cleanup(void)
1773 ibnl_remove_client(RDMA_NL_LS);
1774 cancel_delayed_work(&ib_nl_timed_work);
1775 flush_workqueue(ib_nl_wq);
1776 destroy_workqueue(ib_nl_wq);
1778 ib_unregister_client(&sa_client);
1779 idr_destroy(&query_idr);
1782 module_init(ib_sa_init);
1783 module_exit(ib_sa_cleanup);