2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2006 Intel Corporation. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/err.h>
38 #include <linux/random.h>
39 #include <linux/spinlock.h>
40 #include <linux/slab.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kref.h>
43 #include <linux/idr.h>
44 #include <linux/workqueue.h>
45 #include <uapi/linux/if_ether.h>
46 #include <rdma/ib_pack.h>
47 #include <rdma/ib_cache.h>
48 #include <rdma/rdma_netlink.h>
49 #include <net/netlink.h>
50 #include <uapi/rdma/ib_user_sa.h>
51 #include <rdma/ib_marshall.h>
52 #include <rdma/ib_addr.h>
54 #include "core_priv.h"
56 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
57 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
58 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
59 #define IB_SA_CPI_MAX_RETRY_CNT 3
60 #define IB_SA_CPI_RETRY_WAIT 1000 /*msecs */
61 static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT;
70 enum rdma_class_port_info_type {
71 RDMA_CLASS_PORT_INFO_IB,
72 RDMA_CLASS_PORT_INFO_OPA
75 struct rdma_class_port_info {
76 enum rdma_class_port_info_type type;
78 struct ib_class_port_info ib;
79 struct opa_class_port_info opa;
83 struct ib_sa_classport_cache {
86 struct rdma_class_port_info data;
90 struct ib_mad_agent *agent;
91 struct ib_sa_sm_ah *sm_ah;
92 struct work_struct update_task;
93 struct ib_sa_classport_cache classport_info;
94 struct delayed_work ib_cpi_work;
95 spinlock_t classport_lock; /* protects class port info set */
100 struct ib_sa_device {
101 int start_port, end_port;
102 struct ib_event_handler event_handler;
103 struct ib_sa_port port[0];
107 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
108 void (*release)(struct ib_sa_query *);
109 struct ib_sa_client *client;
110 struct ib_sa_port *port;
111 struct ib_mad_send_buf *mad_buf;
112 struct ib_sa_sm_ah *sm_ah;
115 struct list_head list; /* Local svc request list */
116 u32 seq; /* Local svc request sequence number */
117 unsigned long timeout; /* Local svc timeout */
118 u8 path_use; /* How will the pathrecord be used */
121 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
122 #define IB_SA_CANCEL 0x00000002
123 #define IB_SA_QUERY_OPA 0x00000004
125 struct ib_sa_service_query {
126 void (*callback)(int, struct ib_sa_service_rec *, void *);
128 struct ib_sa_query sa_query;
131 struct ib_sa_path_query {
132 void (*callback)(int, struct sa_path_rec *, void *);
134 struct ib_sa_query sa_query;
135 struct sa_path_rec *conv_pr;
138 struct ib_sa_guidinfo_query {
139 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
141 struct ib_sa_query sa_query;
144 struct ib_sa_classport_info_query {
145 void (*callback)(void *);
147 struct ib_sa_query sa_query;
150 struct ib_sa_mcmember_query {
151 void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
153 struct ib_sa_query sa_query;
156 static LIST_HEAD(ib_nl_request_list);
157 static DEFINE_SPINLOCK(ib_nl_request_lock);
158 static atomic_t ib_nl_sa_request_seq;
159 static struct workqueue_struct *ib_nl_wq;
160 static struct delayed_work ib_nl_timed_work;
161 static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
162 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY,
163 .len = sizeof(struct ib_path_rec_data)},
164 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32},
165 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64},
166 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
167 .len = sizeof(struct rdma_nla_ls_gid)},
168 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY,
169 .len = sizeof(struct rdma_nla_ls_gid)},
170 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8},
171 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16},
172 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16},
176 static void ib_sa_add_one(struct ib_device *device);
177 static void ib_sa_remove_one(struct ib_device *device, void *client_data);
179 static struct ib_client sa_client = {
181 .add = ib_sa_add_one,
182 .remove = ib_sa_remove_one
185 static DEFINE_SPINLOCK(idr_lock);
186 static DEFINE_IDR(query_idr);
188 static DEFINE_SPINLOCK(tid_lock);
191 #define PATH_REC_FIELD(field) \
192 .struct_offset_bytes = offsetof(struct sa_path_rec, field), \
193 .struct_size_bytes = sizeof((struct sa_path_rec *)0)->field, \
194 .field_name = "sa_path_rec:" #field
196 static const struct ib_field path_rec_table[] = {
197 { PATH_REC_FIELD(ib.service_id),
201 { PATH_REC_FIELD(dgid),
205 { PATH_REC_FIELD(sgid),
209 { PATH_REC_FIELD(ib.dlid),
213 { PATH_REC_FIELD(ib.slid),
217 { PATH_REC_FIELD(ib.raw_traffic),
225 { PATH_REC_FIELD(flow_label),
229 { PATH_REC_FIELD(hop_limit),
233 { PATH_REC_FIELD(traffic_class),
237 { PATH_REC_FIELD(reversible),
241 { PATH_REC_FIELD(numb_path),
245 { PATH_REC_FIELD(pkey),
249 { PATH_REC_FIELD(qos_class),
253 { PATH_REC_FIELD(sl),
257 { PATH_REC_FIELD(mtu_selector),
261 { PATH_REC_FIELD(mtu),
265 { PATH_REC_FIELD(rate_selector),
269 { PATH_REC_FIELD(rate),
273 { PATH_REC_FIELD(packet_life_time_selector),
277 { PATH_REC_FIELD(packet_life_time),
281 { PATH_REC_FIELD(preference),
291 #define OPA_PATH_REC_FIELD(field) \
292 .struct_offset_bytes = \
293 offsetof(struct sa_path_rec, field), \
294 .struct_size_bytes = \
295 sizeof((struct sa_path_rec *)0)->field, \
296 .field_name = "sa_path_rec:" #field
298 static const struct ib_field opa_path_rec_table[] = {
299 { OPA_PATH_REC_FIELD(opa.service_id),
303 { OPA_PATH_REC_FIELD(dgid),
307 { OPA_PATH_REC_FIELD(sgid),
311 { OPA_PATH_REC_FIELD(opa.dlid),
315 { OPA_PATH_REC_FIELD(opa.slid),
319 { OPA_PATH_REC_FIELD(opa.raw_traffic),
327 { OPA_PATH_REC_FIELD(flow_label),
331 { OPA_PATH_REC_FIELD(hop_limit),
335 { OPA_PATH_REC_FIELD(traffic_class),
339 { OPA_PATH_REC_FIELD(reversible),
343 { OPA_PATH_REC_FIELD(numb_path),
347 { OPA_PATH_REC_FIELD(pkey),
351 { OPA_PATH_REC_FIELD(opa.l2_8B),
355 { OPA_PATH_REC_FIELD(opa.l2_10B),
359 { OPA_PATH_REC_FIELD(opa.l2_9B),
363 { OPA_PATH_REC_FIELD(opa.l2_16B),
371 { OPA_PATH_REC_FIELD(opa.qos_type),
375 { OPA_PATH_REC_FIELD(opa.qos_priority),
383 { OPA_PATH_REC_FIELD(sl),
391 { OPA_PATH_REC_FIELD(mtu_selector),
395 { OPA_PATH_REC_FIELD(mtu),
399 { OPA_PATH_REC_FIELD(rate_selector),
403 { OPA_PATH_REC_FIELD(rate),
407 { OPA_PATH_REC_FIELD(packet_life_time_selector),
411 { OPA_PATH_REC_FIELD(packet_life_time),
415 { OPA_PATH_REC_FIELD(preference),
421 #define MCMEMBER_REC_FIELD(field) \
422 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
423 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
424 .field_name = "sa_mcmember_rec:" #field
426 static const struct ib_field mcmember_rec_table[] = {
427 { MCMEMBER_REC_FIELD(mgid),
431 { MCMEMBER_REC_FIELD(port_gid),
435 { MCMEMBER_REC_FIELD(qkey),
439 { MCMEMBER_REC_FIELD(mlid),
443 { MCMEMBER_REC_FIELD(mtu_selector),
447 { MCMEMBER_REC_FIELD(mtu),
451 { MCMEMBER_REC_FIELD(traffic_class),
455 { MCMEMBER_REC_FIELD(pkey),
459 { MCMEMBER_REC_FIELD(rate_selector),
463 { MCMEMBER_REC_FIELD(rate),
467 { MCMEMBER_REC_FIELD(packet_life_time_selector),
471 { MCMEMBER_REC_FIELD(packet_life_time),
475 { MCMEMBER_REC_FIELD(sl),
479 { MCMEMBER_REC_FIELD(flow_label),
483 { MCMEMBER_REC_FIELD(hop_limit),
487 { MCMEMBER_REC_FIELD(scope),
491 { MCMEMBER_REC_FIELD(join_state),
495 { MCMEMBER_REC_FIELD(proxy_join),
505 #define SERVICE_REC_FIELD(field) \
506 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
507 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
508 .field_name = "sa_service_rec:" #field
510 static const struct ib_field service_rec_table[] = {
511 { SERVICE_REC_FIELD(id),
515 { SERVICE_REC_FIELD(gid),
519 { SERVICE_REC_FIELD(pkey),
523 { SERVICE_REC_FIELD(lease),
527 { SERVICE_REC_FIELD(key),
531 { SERVICE_REC_FIELD(name),
535 { SERVICE_REC_FIELD(data8),
539 { SERVICE_REC_FIELD(data16),
543 { SERVICE_REC_FIELD(data32),
547 { SERVICE_REC_FIELD(data64),
553 #define CLASSPORTINFO_REC_FIELD(field) \
554 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
555 .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \
556 .field_name = "ib_class_port_info:" #field
558 static const struct ib_field ib_classport_info_rec_table[] = {
559 { CLASSPORTINFO_REC_FIELD(base_version),
563 { CLASSPORTINFO_REC_FIELD(class_version),
567 { CLASSPORTINFO_REC_FIELD(capability_mask),
571 { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
575 { CLASSPORTINFO_REC_FIELD(redirect_gid),
579 { CLASSPORTINFO_REC_FIELD(redirect_tcslfl),
583 { CLASSPORTINFO_REC_FIELD(redirect_lid),
587 { CLASSPORTINFO_REC_FIELD(redirect_pkey),
592 { CLASSPORTINFO_REC_FIELD(redirect_qp),
596 { CLASSPORTINFO_REC_FIELD(redirect_qkey),
601 { CLASSPORTINFO_REC_FIELD(trap_gid),
605 { CLASSPORTINFO_REC_FIELD(trap_tcslfl),
610 { CLASSPORTINFO_REC_FIELD(trap_lid),
614 { CLASSPORTINFO_REC_FIELD(trap_pkey),
619 { CLASSPORTINFO_REC_FIELD(trap_hlqp),
623 { CLASSPORTINFO_REC_FIELD(trap_qkey),
629 #define OPA_CLASSPORTINFO_REC_FIELD(field) \
630 .struct_offset_bytes =\
631 offsetof(struct opa_class_port_info, field), \
632 .struct_size_bytes = \
633 sizeof((struct opa_class_port_info *)0)->field, \
634 .field_name = "opa_class_port_info:" #field
636 static const struct ib_field opa_classport_info_rec_table[] = {
637 { OPA_CLASSPORTINFO_REC_FIELD(base_version),
641 { OPA_CLASSPORTINFO_REC_FIELD(class_version),
645 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask),
649 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
653 { OPA_CLASSPORTINFO_REC_FIELD(redirect_gid),
657 { OPA_CLASSPORTINFO_REC_FIELD(redirect_tc_fl),
661 { OPA_CLASSPORTINFO_REC_FIELD(redirect_lid),
665 { OPA_CLASSPORTINFO_REC_FIELD(redirect_sl_qp),
669 { OPA_CLASSPORTINFO_REC_FIELD(redirect_qkey),
673 { OPA_CLASSPORTINFO_REC_FIELD(trap_gid),
677 { OPA_CLASSPORTINFO_REC_FIELD(trap_tc_fl),
681 { OPA_CLASSPORTINFO_REC_FIELD(trap_lid),
685 { OPA_CLASSPORTINFO_REC_FIELD(trap_hl_qp),
689 { OPA_CLASSPORTINFO_REC_FIELD(trap_qkey),
693 { OPA_CLASSPORTINFO_REC_FIELD(trap_pkey),
697 { OPA_CLASSPORTINFO_REC_FIELD(redirect_pkey),
701 { OPA_CLASSPORTINFO_REC_FIELD(trap_sl_rsvd),
711 #define GUIDINFO_REC_FIELD(field) \
712 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
713 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
714 .field_name = "sa_guidinfo_rec:" #field
716 static const struct ib_field guidinfo_rec_table[] = {
717 { GUIDINFO_REC_FIELD(lid),
721 { GUIDINFO_REC_FIELD(block_num),
725 { GUIDINFO_REC_FIELD(res1),
729 { GUIDINFO_REC_FIELD(res2),
733 { GUIDINFO_REC_FIELD(guid_info_list),
739 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
741 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
744 static inline int ib_sa_query_cancelled(struct ib_sa_query *query)
746 return (query->flags & IB_SA_CANCEL);
749 static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
750 struct ib_sa_query *query)
752 struct sa_path_rec *sa_rec = query->mad_buf->context[1];
753 struct ib_sa_mad *mad = query->mad_buf->mad;
754 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask;
757 struct rdma_ls_resolve_header *header;
759 query->mad_buf->context[1] = NULL;
761 /* Construct the family header first */
762 header = (struct rdma_ls_resolve_header *)
763 skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
764 memcpy(header->device_name, query->port->agent->device->name,
766 header->port_num = query->port->port_num;
768 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
769 sa_rec->reversible != 0)
770 query->path_use = LS_RESOLVE_PATH_USE_GMP;
772 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
773 header->path_use = query->path_use;
775 /* Now build the attributes */
776 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
777 val64 = be64_to_cpu(sa_path_get_service_id(sa_rec));
778 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
779 sizeof(val64), &val64);
781 if (comp_mask & IB_SA_PATH_REC_DGID)
782 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID,
783 sizeof(sa_rec->dgid), &sa_rec->dgid);
784 if (comp_mask & IB_SA_PATH_REC_SGID)
785 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID,
786 sizeof(sa_rec->sgid), &sa_rec->sgid);
787 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
788 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS,
789 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class);
791 if (comp_mask & IB_SA_PATH_REC_PKEY) {
792 val16 = be16_to_cpu(sa_rec->pkey);
793 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY,
794 sizeof(val16), &val16);
796 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) {
797 val16 = be16_to_cpu(sa_rec->qos_class);
798 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS,
799 sizeof(val16), &val16);
803 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
807 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID)
808 len += nla_total_size(sizeof(u64));
809 if (comp_mask & IB_SA_PATH_REC_DGID)
810 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
811 if (comp_mask & IB_SA_PATH_REC_SGID)
812 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
813 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
814 len += nla_total_size(sizeof(u8));
815 if (comp_mask & IB_SA_PATH_REC_PKEY)
816 len += nla_total_size(sizeof(u16));
817 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS)
818 len += nla_total_size(sizeof(u16));
821 * Make sure that at least some of the required comp_mask bits are
824 if (WARN_ON(len == 0))
827 /* Add the family header */
828 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header));
833 static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
835 struct sk_buff *skb = NULL;
836 struct nlmsghdr *nlh;
839 struct ib_sa_mad *mad;
842 mad = query->mad_buf->mad;
843 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
847 skb = nlmsg_new(len, gfp_mask);
851 /* Put nlmsg header only for now */
852 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
853 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
860 ib_nl_set_path_rec_attrs(skb, query);
862 /* Repair the nlmsg header length */
865 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, gfp_mask);
874 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
880 INIT_LIST_HEAD(&query->list);
881 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
883 /* Put the request on the list first.*/
884 spin_lock_irqsave(&ib_nl_request_lock, flags);
885 delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
886 query->timeout = delay + jiffies;
887 list_add_tail(&query->list, &ib_nl_request_list);
888 /* Start the timeout if this is the only request */
889 if (ib_nl_request_list.next == &query->list)
890 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
891 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
893 ret = ib_nl_send_msg(query, gfp_mask);
896 /* Remove the request */
897 spin_lock_irqsave(&ib_nl_request_lock, flags);
898 list_del(&query->list);
899 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
907 static int ib_nl_cancel_request(struct ib_sa_query *query)
910 struct ib_sa_query *wait_query;
913 spin_lock_irqsave(&ib_nl_request_lock, flags);
914 list_for_each_entry(wait_query, &ib_nl_request_list, list) {
915 /* Let the timeout to take care of the callback */
916 if (query == wait_query) {
917 query->flags |= IB_SA_CANCEL;
918 query->timeout = jiffies;
919 list_move(&query->list, &ib_nl_request_list);
921 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1);
925 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
930 static void send_handler(struct ib_mad_agent *agent,
931 struct ib_mad_send_wc *mad_send_wc);
933 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
934 const struct nlmsghdr *nlh)
936 struct ib_mad_send_wc mad_send_wc;
937 struct ib_sa_mad *mad = NULL;
938 const struct nlattr *head, *curr;
939 struct ib_path_rec_data *rec;
944 if (query->callback) {
945 head = (const struct nlattr *) nlmsg_data(nlh);
946 len = nlmsg_len(nlh);
947 switch (query->path_use) {
948 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
949 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
952 case LS_RESOLVE_PATH_USE_ALL:
953 case LS_RESOLVE_PATH_USE_GMP:
955 mask = IB_PATH_PRIMARY | IB_PATH_GMP |
956 IB_PATH_BIDIRECTIONAL;
959 nla_for_each_attr(curr, head, len, rem) {
960 if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) {
961 rec = nla_data(curr);
963 * Get the first one. In the future, we may
964 * need to get up to 6 pathrecords.
966 if ((rec->flags & mask) == mask) {
967 mad = query->mad_buf->mad;
968 mad->mad_hdr.method |=
970 memcpy(mad->data, rec->path_rec,
971 sizeof(rec->path_rec));
977 query->callback(query, status, mad);
980 mad_send_wc.send_buf = query->mad_buf;
981 mad_send_wc.status = IB_WC_SUCCESS;
982 send_handler(query->mad_buf->mad_agent, &mad_send_wc);
985 static void ib_nl_request_timeout(struct work_struct *work)
988 struct ib_sa_query *query;
990 struct ib_mad_send_wc mad_send_wc;
993 spin_lock_irqsave(&ib_nl_request_lock, flags);
994 while (!list_empty(&ib_nl_request_list)) {
995 query = list_entry(ib_nl_request_list.next,
996 struct ib_sa_query, list);
998 if (time_after(query->timeout, jiffies)) {
999 delay = query->timeout - jiffies;
1000 if ((long)delay <= 0)
1002 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
1006 list_del(&query->list);
1007 ib_sa_disable_local_svc(query);
1008 /* Hold the lock to protect against query cancellation */
1009 if (ib_sa_query_cancelled(query))
1012 ret = ib_post_send_mad(query->mad_buf, NULL);
1014 mad_send_wc.send_buf = query->mad_buf;
1015 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
1016 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1017 send_handler(query->port->agent, &mad_send_wc);
1018 spin_lock_irqsave(&ib_nl_request_lock, flags);
1021 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1024 int ib_nl_handle_set_timeout(struct sk_buff *skb,
1025 struct netlink_callback *cb)
1027 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
1028 int timeout, delta, abs_delta;
1029 const struct nlattr *attr;
1030 unsigned long flags;
1031 struct ib_sa_query *query;
1033 struct nlattr *tb[LS_NLA_TYPE_MAX];
1036 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
1037 !(NETLINK_CB(skb).sk) ||
1038 !netlink_capable(skb, CAP_NET_ADMIN))
1041 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1042 nlmsg_len(nlh), ib_nl_policy, NULL);
1043 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
1045 goto settimeout_out;
1047 timeout = *(int *) nla_data(attr);
1048 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN)
1049 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN;
1050 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
1051 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
1053 delta = timeout - sa_local_svc_timeout_ms;
1060 spin_lock_irqsave(&ib_nl_request_lock, flags);
1061 sa_local_svc_timeout_ms = timeout;
1062 list_for_each_entry(query, &ib_nl_request_list, list) {
1063 if (delta < 0 && abs_delta > query->timeout)
1066 query->timeout += delta;
1068 /* Get the new delay from the first entry */
1070 delay = query->timeout - jiffies;
1076 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
1077 (unsigned long)delay);
1078 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1085 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
1087 struct nlattr *tb[LS_NLA_TYPE_MAX];
1090 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
1093 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1094 nlmsg_len(nlh), ib_nl_policy, NULL);
1101 int ib_nl_handle_resolve_resp(struct sk_buff *skb,
1102 struct netlink_callback *cb)
1104 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
1105 unsigned long flags;
1106 struct ib_sa_query *query;
1107 struct ib_mad_send_buf *send_buf;
1108 struct ib_mad_send_wc mad_send_wc;
1112 if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
1113 !(NETLINK_CB(skb).sk) ||
1114 !netlink_capable(skb, CAP_NET_ADMIN))
1117 spin_lock_irqsave(&ib_nl_request_lock, flags);
1118 list_for_each_entry(query, &ib_nl_request_list, list) {
1120 * If the query is cancelled, let the timeout routine
1123 if (nlh->nlmsg_seq == query->seq) {
1124 found = !ib_sa_query_cancelled(query);
1126 list_del(&query->list);
1132 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1136 send_buf = query->mad_buf;
1138 if (!ib_nl_is_good_resolve_resp(nlh)) {
1139 /* if the result is a failure, send out the packet via IB */
1140 ib_sa_disable_local_svc(query);
1141 ret = ib_post_send_mad(query->mad_buf, NULL);
1142 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1144 mad_send_wc.send_buf = send_buf;
1145 mad_send_wc.status = IB_WC_GENERAL_ERR;
1146 send_handler(query->port->agent, &mad_send_wc);
1149 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1150 ib_nl_process_good_resolve_rsp(query, nlh);
1157 static void free_sm_ah(struct kref *kref)
1159 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
1161 rdma_destroy_ah(sm_ah->ah);
1165 void ib_sa_register_client(struct ib_sa_client *client)
1167 atomic_set(&client->users, 1);
1168 init_completion(&client->comp);
1170 EXPORT_SYMBOL(ib_sa_register_client);
1172 void ib_sa_unregister_client(struct ib_sa_client *client)
1174 ib_sa_client_put(client);
1175 wait_for_completion(&client->comp);
1177 EXPORT_SYMBOL(ib_sa_unregister_client);
1180 * ib_sa_cancel_query - try to cancel an SA query
1181 * @id:ID of query to cancel
1182 * @query:query pointer to cancel
1184 * Try to cancel an SA query. If the id and query don't match up or
1185 * the query has already completed, nothing is done. Otherwise the
1186 * query is canceled and will complete with a status of -EINTR.
1188 void ib_sa_cancel_query(int id, struct ib_sa_query *query)
1190 unsigned long flags;
1191 struct ib_mad_agent *agent;
1192 struct ib_mad_send_buf *mad_buf;
1194 spin_lock_irqsave(&idr_lock, flags);
1195 if (idr_find(&query_idr, id) != query) {
1196 spin_unlock_irqrestore(&idr_lock, flags);
1199 agent = query->port->agent;
1200 mad_buf = query->mad_buf;
1201 spin_unlock_irqrestore(&idr_lock, flags);
1204 * If the query is still on the netlink request list, schedule
1205 * it to be cancelled by the timeout routine. Otherwise, it has been
1206 * sent to the MAD layer and has to be cancelled from there.
1208 if (!ib_nl_cancel_request(query))
1209 ib_cancel_mad(agent, mad_buf);
1211 EXPORT_SYMBOL(ib_sa_cancel_query);
1213 static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
1215 struct ib_sa_device *sa_dev;
1216 struct ib_sa_port *port;
1217 unsigned long flags;
1220 sa_dev = ib_get_client_data(device, &sa_client);
1224 port = &sa_dev->port[port_num - sa_dev->start_port];
1225 spin_lock_irqsave(&port->ah_lock, flags);
1226 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
1227 spin_unlock_irqrestore(&port->ah_lock, flags);
1229 return src_path_mask;
1232 int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
1233 struct sa_path_rec *rec,
1234 struct rdma_ah_attr *ah_attr)
1239 struct net_device *ndev = NULL;
1241 memset(ah_attr, 0, sizeof *ah_attr);
1242 ah_attr->type = rdma_ah_find_type(device, port_num);
1244 rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec)));
1245 rdma_ah_set_sl(ah_attr, rec->sl);
1246 rdma_ah_set_path_bits(ah_attr, be32_to_cpu(sa_path_get_slid(rec)) &
1247 get_src_path_mask(device, port_num));
1248 rdma_ah_set_port_num(ah_attr, port_num);
1249 rdma_ah_set_static_rate(ah_attr, rec->rate);
1250 use_roce = rdma_cap_eth_ah(device, port_num);
1253 struct net_device *idev;
1254 struct net_device *resolved_dev;
1255 struct rdma_dev_addr dev_addr = {
1256 .bound_dev_if = ((sa_path_get_ifindex(rec) >= 0) ?
1257 sa_path_get_ifindex(rec) : 0),
1258 .net = sa_path_get_ndev(rec) ?
1259 sa_path_get_ndev(rec) :
1263 struct sockaddr _sockaddr;
1264 struct sockaddr_in _sockaddr_in;
1265 struct sockaddr_in6 _sockaddr_in6;
1266 } sgid_addr, dgid_addr;
1268 if (!device->get_netdev)
1271 rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
1272 rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
1274 /* validate the route */
1275 ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
1276 &dgid_addr._sockaddr, &dev_addr);
1280 if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
1281 dev_addr.network == RDMA_NETWORK_IPV6) &&
1282 rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2)
1285 idev = device->get_netdev(device, port_num);
1289 resolved_dev = dev_get_by_index(dev_addr.net,
1290 dev_addr.bound_dev_if);
1291 if (resolved_dev->flags & IFF_LOOPBACK) {
1292 dev_put(resolved_dev);
1293 resolved_dev = idev;
1294 dev_hold(resolved_dev);
1296 ndev = ib_get_ndev_from_path(rec);
1298 if ((ndev && ndev != resolved_dev) ||
1299 (resolved_dev != idev &&
1300 !rdma_is_upper_dev_rcu(idev, resolved_dev)))
1301 ret = -EHOSTUNREACH;
1304 dev_put(resolved_dev);
1312 if (rec->hop_limit > 0 || use_roce) {
1313 enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec);
1315 ret = ib_find_cached_gid_by_port(device, &rec->sgid, type,
1316 port_num, ndev, &gid_index);
1323 rdma_ah_set_grh(ah_attr, &rec->dgid,
1324 be32_to_cpu(rec->flow_label),
1325 gid_index, rec->hop_limit,
1326 rec->traffic_class);
1332 u8 *dmac = sa_path_get_dmac(rec);
1336 memcpy(ah_attr->roce.dmac, dmac, ETH_ALEN);
1341 EXPORT_SYMBOL(ib_init_ah_from_path);
1343 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
1345 unsigned long flags;
1347 spin_lock_irqsave(&query->port->ah_lock, flags);
1348 if (!query->port->sm_ah) {
1349 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1352 kref_get(&query->port->sm_ah->ref);
1353 query->sm_ah = query->port->sm_ah;
1354 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1356 query->mad_buf = ib_create_send_mad(query->port->agent, 1,
1357 query->sm_ah->pkey_index,
1358 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
1360 ((query->flags & IB_SA_QUERY_OPA) ?
1361 OPA_MGMT_BASE_VERSION :
1362 IB_MGMT_BASE_VERSION));
1363 if (IS_ERR(query->mad_buf)) {
1364 kref_put(&query->sm_ah->ref, free_sm_ah);
1368 query->mad_buf->ah = query->sm_ah->ah;
1373 static void free_mad(struct ib_sa_query *query)
1375 ib_free_send_mad(query->mad_buf);
1376 kref_put(&query->sm_ah->ref, free_sm_ah);
1379 static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent)
1381 struct ib_sa_mad *mad = query->mad_buf->mad;
1382 unsigned long flags;
1384 memset(mad, 0, sizeof *mad);
1386 if (query->flags & IB_SA_QUERY_OPA) {
1387 mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION;
1388 mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION;
1390 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
1391 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
1393 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
1394 spin_lock_irqsave(&tid_lock, flags);
1396 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
1397 spin_unlock_irqrestore(&tid_lock, flags);
1400 static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
1402 bool preload = gfpflags_allow_blocking(gfp_mask);
1403 unsigned long flags;
1407 idr_preload(gfp_mask);
1408 spin_lock_irqsave(&idr_lock, flags);
1410 id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
1412 spin_unlock_irqrestore(&idr_lock, flags);
1418 query->mad_buf->timeout_ms = timeout_ms;
1419 query->mad_buf->context[0] = query;
1422 if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) &&
1423 (!(query->flags & IB_SA_QUERY_OPA))) {
1424 if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) {
1425 if (!ib_nl_make_request(query, gfp_mask))
1428 ib_sa_disable_local_svc(query);
1431 ret = ib_post_send_mad(query->mad_buf, NULL);
1433 spin_lock_irqsave(&idr_lock, flags);
1434 idr_remove(&query_idr, id);
1435 spin_unlock_irqrestore(&idr_lock, flags);
1439 * It's not safe to dereference query any more, because the
1440 * send may already have completed and freed the query in
1443 return ret ? ret : id;
1446 void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec)
1448 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
1450 EXPORT_SYMBOL(ib_sa_unpack_path);
1452 void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute)
1454 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute);
1456 EXPORT_SYMBOL(ib_sa_pack_path);
1458 static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
1459 struct ib_device *device,
1462 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1463 struct ib_sa_port *port;
1464 unsigned long flags;
1470 port = &sa_dev->port[port_num - sa_dev->start_port];
1471 spin_lock_irqsave(&port->classport_lock, flags);
1472 if (!port->classport_info.valid)
1475 if (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_OPA)
1476 ret = opa_get_cpi_capmask2(&port->classport_info.data.opa) &
1477 OPA_CLASS_PORT_INFO_PR_SUPPORT;
1479 spin_unlock_irqrestore(&port->classport_lock, flags);
1483 enum opa_pr_supported {
1490 * Check if current PR query can be an OPA query.
1491 * Retuns PR_NOT_SUPPORTED if a path record query is not
1492 * possible, PR_OPA_SUPPORTED if an OPA path record query
1493 * is possible and PR_IB_SUPPORTED if an IB path record
1494 * query is possible.
1496 static int opa_pr_query_possible(struct ib_sa_client *client,
1497 struct ib_device *device,
1499 struct sa_path_rec *rec)
1501 struct ib_port_attr port_attr;
1503 if (ib_query_port(device, port_num, &port_attr))
1504 return PR_NOT_SUPPORTED;
1506 if (ib_sa_opa_pathrecord_support(client, device, port_num))
1507 return PR_OPA_SUPPORTED;
1509 if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
1510 return PR_NOT_SUPPORTED;
1512 return PR_IB_SUPPORTED;
1515 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
1517 struct ib_sa_mad *mad)
1519 struct ib_sa_path_query *query =
1520 container_of(sa_query, struct ib_sa_path_query, sa_query);
1523 struct sa_path_rec rec;
1525 if (sa_query->flags & IB_SA_QUERY_OPA) {
1526 ib_unpack(opa_path_rec_table,
1527 ARRAY_SIZE(opa_path_rec_table),
1529 rec.rec_type = SA_PATH_REC_TYPE_OPA;
1530 query->callback(status, &rec, query->context);
1532 ib_unpack(path_rec_table,
1533 ARRAY_SIZE(path_rec_table),
1535 rec.rec_type = SA_PATH_REC_TYPE_IB;
1536 sa_path_set_ndev(&rec, NULL);
1537 sa_path_set_ifindex(&rec, 0);
1538 sa_path_set_dmac_zero(&rec);
1540 if (query->conv_pr) {
1541 struct sa_path_rec opa;
1543 memset(&opa, 0, sizeof(struct sa_path_rec));
1544 sa_convert_path_ib_to_opa(&opa, &rec);
1545 query->callback(status, &opa, query->context);
1547 query->callback(status, &rec, query->context);
1551 query->callback(status, NULL, query->context);
1554 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
1556 struct ib_sa_path_query *query =
1557 container_of(sa_query, struct ib_sa_path_query, sa_query);
1559 kfree(query->conv_pr);
1564 * ib_sa_path_rec_get - Start a Path get query
1566 * @device:device to send query on
1567 * @port_num: port number to send query on
1568 * @rec:Path Record to send in query
1569 * @comp_mask:component mask to send in query
1570 * @timeout_ms:time to wait for response
1571 * @gfp_mask:GFP mask to use for internal allocations
1572 * @callback:function called when query completes, times out or is
1574 * @context:opaque user context passed to callback
1575 * @sa_query:query context, used to cancel query
1577 * Send a Path Record Get query to the SA to look up a path. The
1578 * callback function will be called when the query completes (or
1579 * fails); status is 0 for a successful response, -EINTR if the query
1580 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1581 * occurred sending the query. The resp parameter of the callback is
1582 * only valid if status is 0.
1584 * If the return value of ib_sa_path_rec_get() is negative, it is an
1585 * error code. Otherwise it is a query ID that can be used to cancel
1588 int ib_sa_path_rec_get(struct ib_sa_client *client,
1589 struct ib_device *device, u8 port_num,
1590 struct sa_path_rec *rec,
1591 ib_sa_comp_mask comp_mask,
1592 int timeout_ms, gfp_t gfp_mask,
1593 void (*callback)(int status,
1594 struct sa_path_rec *resp,
1597 struct ib_sa_query **sa_query)
1599 struct ib_sa_path_query *query;
1600 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1601 struct ib_sa_port *port;
1602 struct ib_mad_agent *agent;
1603 struct ib_sa_mad *mad;
1604 enum opa_pr_supported status;
1610 if ((rec->rec_type != SA_PATH_REC_TYPE_IB) &&
1611 (rec->rec_type != SA_PATH_REC_TYPE_OPA))
1614 port = &sa_dev->port[port_num - sa_dev->start_port];
1615 agent = port->agent;
1617 query = kzalloc(sizeof(*query), gfp_mask);
1621 query->sa_query.port = port;
1622 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
1623 status = opa_pr_query_possible(client, device, port_num, rec);
1624 if (status == PR_NOT_SUPPORTED) {
1627 } else if (status == PR_OPA_SUPPORTED) {
1628 query->sa_query.flags |= IB_SA_QUERY_OPA;
1631 kmalloc(sizeof(*query->conv_pr), gfp_mask);
1632 if (!query->conv_pr) {
1639 ret = alloc_mad(&query->sa_query, gfp_mask);
1643 ib_sa_client_get(client);
1644 query->sa_query.client = client;
1645 query->callback = callback;
1646 query->context = context;
1648 mad = query->sa_query.mad_buf->mad;
1649 init_mad(&query->sa_query, agent);
1651 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
1652 query->sa_query.release = ib_sa_path_rec_release;
1653 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1654 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
1655 mad->sa_hdr.comp_mask = comp_mask;
1657 if (query->sa_query.flags & IB_SA_QUERY_OPA) {
1658 ib_pack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
1660 } else if (query->conv_pr) {
1661 sa_convert_path_opa_to_ib(query->conv_pr, rec);
1662 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1663 query->conv_pr, mad->data);
1665 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1669 *sa_query = &query->sa_query;
1671 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
1672 query->sa_query.mad_buf->context[1] = (query->conv_pr) ?
1673 query->conv_pr : rec;
1675 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1683 ib_sa_client_put(query->sa_query.client);
1684 free_mad(&query->sa_query);
1686 kfree(query->conv_pr);
1691 EXPORT_SYMBOL(ib_sa_path_rec_get);
1693 static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
1695 struct ib_sa_mad *mad)
1697 struct ib_sa_service_query *query =
1698 container_of(sa_query, struct ib_sa_service_query, sa_query);
1701 struct ib_sa_service_rec rec;
1703 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
1705 query->callback(status, &rec, query->context);
1707 query->callback(status, NULL, query->context);
1710 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
1712 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
1716 * ib_sa_service_rec_query - Start Service Record operation
1718 * @device:device to send request on
1719 * @port_num: port number to send request on
1720 * @method:SA method - should be get, set, or delete
1721 * @rec:Service Record to send in request
1722 * @comp_mask:component mask to send in request
1723 * @timeout_ms:time to wait for response
1724 * @gfp_mask:GFP mask to use for internal allocations
1725 * @callback:function called when request completes, times out or is
1727 * @context:opaque user context passed to callback
1728 * @sa_query:request context, used to cancel request
1730 * Send a Service Record set/get/delete to the SA to register,
1731 * unregister or query a service record.
1732 * The callback function will be called when the request completes (or
1733 * fails); status is 0 for a successful response, -EINTR if the query
1734 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1735 * occurred sending the query. The resp parameter of the callback is
1736 * only valid if status is 0.
1738 * If the return value of ib_sa_service_rec_query() is negative, it is an
1739 * error code. Otherwise it is a request ID that can be used to cancel
1742 int ib_sa_service_rec_query(struct ib_sa_client *client,
1743 struct ib_device *device, u8 port_num, u8 method,
1744 struct ib_sa_service_rec *rec,
1745 ib_sa_comp_mask comp_mask,
1746 int timeout_ms, gfp_t gfp_mask,
1747 void (*callback)(int status,
1748 struct ib_sa_service_rec *resp,
1751 struct ib_sa_query **sa_query)
1753 struct ib_sa_service_query *query;
1754 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1755 struct ib_sa_port *port;
1756 struct ib_mad_agent *agent;
1757 struct ib_sa_mad *mad;
1763 port = &sa_dev->port[port_num - sa_dev->start_port];
1764 agent = port->agent;
1766 if (method != IB_MGMT_METHOD_GET &&
1767 method != IB_MGMT_METHOD_SET &&
1768 method != IB_SA_METHOD_DELETE)
1771 query = kzalloc(sizeof(*query), gfp_mask);
1775 query->sa_query.port = port;
1776 ret = alloc_mad(&query->sa_query, gfp_mask);
1780 ib_sa_client_get(client);
1781 query->sa_query.client = client;
1782 query->callback = callback;
1783 query->context = context;
1785 mad = query->sa_query.mad_buf->mad;
1786 init_mad(&query->sa_query, agent);
1788 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
1789 query->sa_query.release = ib_sa_service_rec_release;
1790 mad->mad_hdr.method = method;
1791 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
1792 mad->sa_hdr.comp_mask = comp_mask;
1794 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
1797 *sa_query = &query->sa_query;
1799 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1807 ib_sa_client_put(query->sa_query.client);
1808 free_mad(&query->sa_query);
1814 EXPORT_SYMBOL(ib_sa_service_rec_query);
1816 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
1818 struct ib_sa_mad *mad)
1820 struct ib_sa_mcmember_query *query =
1821 container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
1824 struct ib_sa_mcmember_rec rec;
1826 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1828 query->callback(status, &rec, query->context);
1830 query->callback(status, NULL, query->context);
1833 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
1835 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
1838 int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
1839 struct ib_device *device, u8 port_num,
1841 struct ib_sa_mcmember_rec *rec,
1842 ib_sa_comp_mask comp_mask,
1843 int timeout_ms, gfp_t gfp_mask,
1844 void (*callback)(int status,
1845 struct ib_sa_mcmember_rec *resp,
1848 struct ib_sa_query **sa_query)
1850 struct ib_sa_mcmember_query *query;
1851 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1852 struct ib_sa_port *port;
1853 struct ib_mad_agent *agent;
1854 struct ib_sa_mad *mad;
1860 port = &sa_dev->port[port_num - sa_dev->start_port];
1861 agent = port->agent;
1863 query = kzalloc(sizeof(*query), gfp_mask);
1867 query->sa_query.port = port;
1868 ret = alloc_mad(&query->sa_query, gfp_mask);
1872 ib_sa_client_get(client);
1873 query->sa_query.client = client;
1874 query->callback = callback;
1875 query->context = context;
1877 mad = query->sa_query.mad_buf->mad;
1878 init_mad(&query->sa_query, agent);
1880 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
1881 query->sa_query.release = ib_sa_mcmember_rec_release;
1882 mad->mad_hdr.method = method;
1883 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
1884 mad->sa_hdr.comp_mask = comp_mask;
1886 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1889 *sa_query = &query->sa_query;
1891 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1899 ib_sa_client_put(query->sa_query.client);
1900 free_mad(&query->sa_query);
1907 /* Support GuidInfoRecord */
1908 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
1910 struct ib_sa_mad *mad)
1912 struct ib_sa_guidinfo_query *query =
1913 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
1916 struct ib_sa_guidinfo_rec rec;
1918 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
1920 query->callback(status, &rec, query->context);
1922 query->callback(status, NULL, query->context);
1925 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
1927 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
1930 int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
1931 struct ib_device *device, u8 port_num,
1932 struct ib_sa_guidinfo_rec *rec,
1933 ib_sa_comp_mask comp_mask, u8 method,
1934 int timeout_ms, gfp_t gfp_mask,
1935 void (*callback)(int status,
1936 struct ib_sa_guidinfo_rec *resp,
1939 struct ib_sa_query **sa_query)
1941 struct ib_sa_guidinfo_query *query;
1942 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1943 struct ib_sa_port *port;
1944 struct ib_mad_agent *agent;
1945 struct ib_sa_mad *mad;
1951 if (method != IB_MGMT_METHOD_GET &&
1952 method != IB_MGMT_METHOD_SET &&
1953 method != IB_SA_METHOD_DELETE) {
1957 port = &sa_dev->port[port_num - sa_dev->start_port];
1958 agent = port->agent;
1960 query = kzalloc(sizeof(*query), gfp_mask);
1964 query->sa_query.port = port;
1965 ret = alloc_mad(&query->sa_query, gfp_mask);
1969 ib_sa_client_get(client);
1970 query->sa_query.client = client;
1971 query->callback = callback;
1972 query->context = context;
1974 mad = query->sa_query.mad_buf->mad;
1975 init_mad(&query->sa_query, agent);
1977 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
1978 query->sa_query.release = ib_sa_guidinfo_rec_release;
1980 mad->mad_hdr.method = method;
1981 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
1982 mad->sa_hdr.comp_mask = comp_mask;
1984 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
1987 *sa_query = &query->sa_query;
1989 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1997 ib_sa_client_put(query->sa_query.client);
1998 free_mad(&query->sa_query);
2004 EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
2006 bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client,
2007 struct ib_device *device,
2010 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
2011 struct ib_sa_port *port;
2013 unsigned long flags;
2018 port = &sa_dev->port[port_num - sa_dev->start_port];
2020 spin_lock_irqsave(&port->classport_lock, flags);
2021 if ((port->classport_info.valid) &&
2022 (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_IB))
2023 ret = ib_get_cpi_capmask2(&port->classport_info.data.ib)
2024 & IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT;
2025 spin_unlock_irqrestore(&port->classport_lock, flags);
2028 EXPORT_SYMBOL(ib_sa_sendonly_fullmem_support);
2030 struct ib_classport_info_context {
2031 struct completion done;
2032 struct ib_sa_query *sa_query;
2035 static void ib_classportinfo_cb(void *context)
2037 struct ib_classport_info_context *cb_ctx = context;
2039 complete(&cb_ctx->done);
2042 static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
2044 struct ib_sa_mad *mad)
2046 unsigned long flags;
2047 struct ib_sa_classport_info_query *query =
2048 container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
2049 struct ib_sa_classport_cache *info = &sa_query->port->classport_info;
2052 if (sa_query->flags & IB_SA_QUERY_OPA) {
2053 struct opa_class_port_info rec;
2055 ib_unpack(opa_classport_info_rec_table,
2056 ARRAY_SIZE(opa_classport_info_rec_table),
2059 spin_lock_irqsave(&sa_query->port->classport_lock,
2061 if (!status && !info->valid) {
2062 memcpy(&info->data.opa, &rec,
2063 sizeof(info->data.opa));
2066 info->data.type = RDMA_CLASS_PORT_INFO_OPA;
2068 spin_unlock_irqrestore(&sa_query->port->classport_lock,
2072 struct ib_class_port_info rec;
2074 ib_unpack(ib_classport_info_rec_table,
2075 ARRAY_SIZE(ib_classport_info_rec_table),
2078 spin_lock_irqsave(&sa_query->port->classport_lock,
2080 if (!status && !info->valid) {
2081 memcpy(&info->data.ib, &rec,
2082 sizeof(info->data.ib));
2085 info->data.type = RDMA_CLASS_PORT_INFO_IB;
2087 spin_unlock_irqrestore(&sa_query->port->classport_lock,
2091 query->callback(query->context);
2094 static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query)
2096 kfree(container_of(sa_query, struct ib_sa_classport_info_query,
2100 static int ib_sa_classport_info_rec_query(struct ib_sa_port *port,
2102 void (*callback)(void *context),
2104 struct ib_sa_query **sa_query)
2106 struct ib_mad_agent *agent;
2107 struct ib_sa_classport_info_query *query;
2108 struct ib_sa_mad *mad;
2109 gfp_t gfp_mask = GFP_KERNEL;
2112 agent = port->agent;
2114 query = kzalloc(sizeof(*query), gfp_mask);
2118 query->sa_query.port = port;
2119 query->sa_query.flags |= rdma_cap_opa_ah(port->agent->device,
2121 IB_SA_QUERY_OPA : 0;
2122 ret = alloc_mad(&query->sa_query, gfp_mask);
2126 query->callback = callback;
2127 query->context = context;
2129 mad = query->sa_query.mad_buf->mad;
2130 init_mad(&query->sa_query, agent);
2132 query->sa_query.callback = ib_sa_classport_info_rec_callback;
2133 query->sa_query.release = ib_sa_classport_info_rec_release;
2134 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
2135 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO);
2136 mad->sa_hdr.comp_mask = 0;
2137 *sa_query = &query->sa_query;
2139 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
2147 free_mad(&query->sa_query);
2154 static void update_ib_cpi(struct work_struct *work)
2156 struct ib_sa_port *port =
2157 container_of(work, struct ib_sa_port, ib_cpi_work.work);
2158 struct ib_classport_info_context *cb_context;
2159 unsigned long flags;
2162 /* If the classport info is valid, nothing
2165 spin_lock_irqsave(&port->classport_lock, flags);
2166 if (port->classport_info.valid) {
2167 spin_unlock_irqrestore(&port->classport_lock, flags);
2170 spin_unlock_irqrestore(&port->classport_lock, flags);
2172 cb_context = kmalloc(sizeof(*cb_context), GFP_KERNEL);
2176 init_completion(&cb_context->done);
2178 ret = ib_sa_classport_info_rec_query(port, 3000,
2179 ib_classportinfo_cb, cb_context,
2180 &cb_context->sa_query);
2183 wait_for_completion(&cb_context->done);
2186 spin_lock_irqsave(&port->classport_lock, flags);
2188 /* If the classport info is still not valid, the query should have
2189 * failed for some reason. Retry issuing the query
2191 if (!port->classport_info.valid) {
2192 port->classport_info.retry_cnt++;
2193 if (port->classport_info.retry_cnt <=
2194 IB_SA_CPI_MAX_RETRY_CNT) {
2195 unsigned long delay =
2196 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
2198 queue_delayed_work(ib_wq, &port->ib_cpi_work, delay);
2201 spin_unlock_irqrestore(&port->classport_lock, flags);
2207 static void send_handler(struct ib_mad_agent *agent,
2208 struct ib_mad_send_wc *mad_send_wc)
2210 struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
2211 unsigned long flags;
2213 if (query->callback)
2214 switch (mad_send_wc->status) {
2216 /* No callback -- already got recv */
2218 case IB_WC_RESP_TIMEOUT_ERR:
2219 query->callback(query, -ETIMEDOUT, NULL);
2221 case IB_WC_WR_FLUSH_ERR:
2222 query->callback(query, -EINTR, NULL);
2225 query->callback(query, -EIO, NULL);
2229 spin_lock_irqsave(&idr_lock, flags);
2230 idr_remove(&query_idr, query->id);
2231 spin_unlock_irqrestore(&idr_lock, flags);
2235 ib_sa_client_put(query->client);
2236 query->release(query);
2239 static void recv_handler(struct ib_mad_agent *mad_agent,
2240 struct ib_mad_send_buf *send_buf,
2241 struct ib_mad_recv_wc *mad_recv_wc)
2243 struct ib_sa_query *query;
2248 query = send_buf->context[0];
2249 if (query->callback) {
2250 if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
2251 query->callback(query,
2252 mad_recv_wc->recv_buf.mad->mad_hdr.status ?
2254 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
2256 query->callback(query, -EIO, NULL);
2259 ib_free_recv_mad(mad_recv_wc);
2262 static void update_sm_ah(struct work_struct *work)
2264 struct ib_sa_port *port =
2265 container_of(work, struct ib_sa_port, update_task);
2266 struct ib_sa_sm_ah *new_ah;
2267 struct ib_port_attr port_attr;
2268 struct rdma_ah_attr ah_attr;
2270 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
2271 pr_warn("Couldn't query port\n");
2275 new_ah = kmalloc(sizeof(*new_ah), GFP_KERNEL);
2279 kref_init(&new_ah->ref);
2280 new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
2282 new_ah->pkey_index = 0;
2283 if (ib_find_pkey(port->agent->device, port->port_num,
2284 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
2285 pr_err("Couldn't find index for default PKey\n");
2287 memset(&ah_attr, 0, sizeof(ah_attr));
2288 ah_attr.type = rdma_ah_find_type(port->agent->device,
2290 rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid);
2291 rdma_ah_set_sl(&ah_attr, port_attr.sm_sl);
2292 rdma_ah_set_port_num(&ah_attr, port->port_num);
2293 if (port_attr.grh_required) {
2294 rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH);
2296 rdma_ah_set_subnet_prefix(&ah_attr,
2297 cpu_to_be64(port_attr.subnet_prefix));
2298 rdma_ah_set_interface_id(&ah_attr,
2299 cpu_to_be64(IB_SA_WELL_KNOWN_GUID));
2302 new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr);
2303 if (IS_ERR(new_ah->ah)) {
2304 pr_warn("Couldn't create new SM AH\n");
2309 spin_lock_irq(&port->ah_lock);
2311 kref_put(&port->sm_ah->ref, free_sm_ah);
2312 port->sm_ah = new_ah;
2313 spin_unlock_irq(&port->ah_lock);
2316 static void ib_sa_event(struct ib_event_handler *handler,
2317 struct ib_event *event)
2319 if (event->event == IB_EVENT_PORT_ERR ||
2320 event->event == IB_EVENT_PORT_ACTIVE ||
2321 event->event == IB_EVENT_LID_CHANGE ||
2322 event->event == IB_EVENT_PKEY_CHANGE ||
2323 event->event == IB_EVENT_SM_CHANGE ||
2324 event->event == IB_EVENT_CLIENT_REREGISTER) {
2325 unsigned long flags;
2326 struct ib_sa_device *sa_dev =
2327 container_of(handler, typeof(*sa_dev), event_handler);
2328 u8 port_num = event->element.port_num - sa_dev->start_port;
2329 struct ib_sa_port *port = &sa_dev->port[port_num];
2331 if (!rdma_cap_ib_sa(handler->device, port->port_num))
2334 spin_lock_irqsave(&port->ah_lock, flags);
2336 kref_put(&port->sm_ah->ref, free_sm_ah);
2338 spin_unlock_irqrestore(&port->ah_lock, flags);
2340 if (event->event == IB_EVENT_SM_CHANGE ||
2341 event->event == IB_EVENT_CLIENT_REREGISTER ||
2342 event->event == IB_EVENT_LID_CHANGE ||
2343 event->event == IB_EVENT_PORT_ACTIVE) {
2344 unsigned long delay =
2345 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
2347 spin_lock_irqsave(&port->classport_lock, flags);
2348 port->classport_info.valid = false;
2349 port->classport_info.retry_cnt = 0;
2350 spin_unlock_irqrestore(&port->classport_lock, flags);
2351 queue_delayed_work(ib_wq,
2352 &port->ib_cpi_work, delay);
2354 queue_work(ib_wq, &sa_dev->port[port_num].update_task);
2358 static void ib_sa_add_one(struct ib_device *device)
2360 struct ib_sa_device *sa_dev;
2364 s = rdma_start_port(device);
2365 e = rdma_end_port(device);
2367 sa_dev = kzalloc(sizeof *sa_dev +
2368 (e - s + 1) * sizeof (struct ib_sa_port),
2373 sa_dev->start_port = s;
2374 sa_dev->end_port = e;
2376 for (i = 0; i <= e - s; ++i) {
2377 spin_lock_init(&sa_dev->port[i].ah_lock);
2378 if (!rdma_cap_ib_sa(device, i + 1))
2381 sa_dev->port[i].sm_ah = NULL;
2382 sa_dev->port[i].port_num = i + s;
2384 spin_lock_init(&sa_dev->port[i].classport_lock);
2385 sa_dev->port[i].classport_info.valid = false;
2387 sa_dev->port[i].agent =
2388 ib_register_mad_agent(device, i + s, IB_QPT_GSI,
2389 NULL, 0, send_handler,
2390 recv_handler, sa_dev, 0);
2391 if (IS_ERR(sa_dev->port[i].agent))
2394 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
2395 INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work,
2404 ib_set_client_data(device, &sa_client, sa_dev);
2407 * We register our event handler after everything is set up,
2408 * and then update our cached info after the event handler is
2409 * registered to avoid any problems if a port changes state
2410 * during our initialization.
2413 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
2414 if (ib_register_event_handler(&sa_dev->event_handler))
2417 for (i = 0; i <= e - s; ++i) {
2418 if (rdma_cap_ib_sa(device, i + 1))
2419 update_sm_ah(&sa_dev->port[i].update_task);
2426 if (rdma_cap_ib_sa(device, i + 1))
2427 ib_unregister_mad_agent(sa_dev->port[i].agent);
2434 static void ib_sa_remove_one(struct ib_device *device, void *client_data)
2436 struct ib_sa_device *sa_dev = client_data;
2442 ib_unregister_event_handler(&sa_dev->event_handler);
2443 flush_workqueue(ib_wq);
2445 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
2446 if (rdma_cap_ib_sa(device, i + 1)) {
2447 cancel_delayed_work_sync(&sa_dev->port[i].ib_cpi_work);
2448 ib_unregister_mad_agent(sa_dev->port[i].agent);
2449 if (sa_dev->port[i].sm_ah)
2450 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
2458 int ib_sa_init(void)
2462 get_random_bytes(&tid, sizeof tid);
2464 atomic_set(&ib_nl_sa_request_seq, 0);
2466 ret = ib_register_client(&sa_client);
2468 pr_err("Couldn't register ib_sa client\n");
2474 pr_err("Couldn't initialize multicast handling\n");
2478 ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM);
2484 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
2491 ib_unregister_client(&sa_client);
2496 void ib_sa_cleanup(void)
2498 cancel_delayed_work(&ib_nl_timed_work);
2499 flush_workqueue(ib_nl_wq);
2500 destroy_workqueue(ib_nl_wq);
2502 ib_unregister_client(&sa_client);
2503 idr_destroy(&query_idr);