Commit | Line | Data |
---|---|---|
24f52149 | 1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
e51060f0 SH |
2 | /* |
3 | * Copyright (c) 2005 Voltaire Inc. All rights reserved. | |
4 | * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. | |
24f52149 | 5 | * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved. |
e51060f0 | 6 | * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. |
e51060f0 SH |
7 | */ |
8 | ||
9 | #include <linux/completion.h> | |
10 | #include <linux/in.h> | |
11 | #include <linux/in6.h> | |
12 | #include <linux/mutex.h> | |
13 | #include <linux/random.h> | |
fc008bdb | 14 | #include <linux/rbtree.h> |
bee3c3c9 | 15 | #include <linux/igmp.h> |
63826753 | 16 | #include <linux/xarray.h> |
07ebafba | 17 | #include <linux/inetdevice.h> |
5a0e3ad6 | 18 | #include <linux/slab.h> |
e4dd23d7 | 19 | #include <linux/module.h> |
366cddb4 | 20 | #include <net/route.h> |
e51060f0 | 21 | |
4be74b42 HE |
22 | #include <net/net_namespace.h> |
23 | #include <net/netns/generic.h> | |
925d046e | 24 | #include <net/netevent.h> |
e51060f0 | 25 | #include <net/tcp.h> |
1f5175ad | 26 | #include <net/ipv6.h> |
f887f2ac HE |
27 | #include <net/ip_fib.h> |
28 | #include <net/ip6_route.h> | |
e51060f0 SH |
29 | |
30 | #include <rdma/rdma_cm.h> | |
31 | #include <rdma/rdma_cm_ib.h> | |
753f618a | 32 | #include <rdma/rdma_netlink.h> |
2e2d190c | 33 | #include <rdma/ib.h> |
e51060f0 SH |
34 | #include <rdma/ib_cache.h> |
35 | #include <rdma/ib_cm.h> | |
36 | #include <rdma/ib_sa.h> | |
07ebafba | 37 | #include <rdma/iw_cm.h> |
e51060f0 | 38 | |
218a773f | 39 | #include "core_priv.h" |
a3b641af | 40 | #include "cma_priv.h" |
ed999f82 | 41 | #include "cma_trace.h" |
218a773f | 42 | |
e51060f0 SH |
43 | MODULE_AUTHOR("Sean Hefty"); |
44 | MODULE_DESCRIPTION("Generic RDMA CM Agent"); | |
45 | MODULE_LICENSE("Dual BSD/GPL"); | |
46 | ||
47 | #define CMA_CM_RESPONSE_TIMEOUT 20 | |
d5bb7599 | 48 | #define CMA_MAX_CM_RETRIES 15 |
dcb3f974 | 49 | #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) |
3c86aa70 | 50 | #define CMA_IBOE_PACKET_LIFETIME 18 |
5ab2d89b | 51 | #define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP |
e51060f0 | 52 | |
2b1b5b60 SG |
53 | static const char * const cma_events[] = { |
54 | [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved", | |
55 | [RDMA_CM_EVENT_ADDR_ERROR] = "address error", | |
56 | [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ", | |
57 | [RDMA_CM_EVENT_ROUTE_ERROR] = "route error", | |
58 | [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request", | |
59 | [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response", | |
60 | [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error", | |
61 | [RDMA_CM_EVENT_UNREACHABLE] = "unreachable", | |
62 | [RDMA_CM_EVENT_REJECTED] = "rejected", | |
63 | [RDMA_CM_EVENT_ESTABLISHED] = "established", | |
64 | [RDMA_CM_EVENT_DISCONNECTED] = "disconnected", | |
65 | [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal", | |
66 | [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join", | |
67 | [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error", | |
68 | [RDMA_CM_EVENT_ADDR_CHANGE] = "address change", | |
69 | [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", | |
70 | }; | |
71 | ||
d9e410eb MG |
72 | static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, |
73 | enum ib_gid_type gid_type); | |
b5de0c60 | 74 | |
db7489e0 | 75 | const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) |
2b1b5b60 SG |
76 | { |
77 | size_t index = event; | |
78 | ||
79 | return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ? | |
80 | cma_events[index] : "unrecognized event"; | |
81 | } | |
82 | EXPORT_SYMBOL(rdma_event_msg); | |
83 | ||
77a5db13 SW |
84 | const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id, |
85 | int reason) | |
86 | { | |
87 | if (rdma_ib_or_roce(id->device, id->port_num)) | |
88 | return ibcm_reject_msg(reason); | |
89 | ||
90 | if (rdma_protocol_iwarp(id->device, id->port_num)) | |
91 | return iwcm_reject_msg(reason); | |
92 | ||
93 | WARN_ON_ONCE(1); | |
94 | return "unrecognized transport"; | |
95 | } | |
96 | EXPORT_SYMBOL(rdma_reject_msg); | |
97 | ||
dd302ee4 LR |
98 | /** |
99 | * rdma_is_consumer_reject - return true if the consumer rejected the connect | |
100 | * request. | |
101 | * @id: Communication identifier that received the REJECT event. | |
102 | * @reason: Value returned in the REJECT event status field. | |
103 | */ | |
104 | static bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason) | |
5042a73d SW |
105 | { |
106 | if (rdma_ib_or_roce(id->device, id->port_num)) | |
107 | return reason == IB_CM_REJ_CONSUMER_DEFINED; | |
108 | ||
109 | if (rdma_protocol_iwarp(id->device, id->port_num)) | |
110 | return reason == -ECONNREFUSED; | |
111 | ||
112 | WARN_ON_ONCE(1); | |
113 | return false; | |
114 | } | |
5042a73d | 115 | |
5f244104 SW |
116 | const void *rdma_consumer_reject_data(struct rdma_cm_id *id, |
117 | struct rdma_cm_event *ev, u8 *data_len) | |
118 | { | |
119 | const void *p; | |
120 | ||
121 | if (rdma_is_consumer_reject(id, ev->status)) { | |
122 | *data_len = ev->param.conn.private_data_len; | |
123 | p = ev->param.conn.private_data; | |
124 | } else { | |
125 | *data_len = 0; | |
126 | p = NULL; | |
127 | } | |
128 | return p; | |
129 | } | |
130 | EXPORT_SYMBOL(rdma_consumer_reject_data); | |
131 | ||
fbdb0a91 SW |
132 | /** |
133 | * rdma_iw_cm_id() - return the iw_cm_id pointer for this cm_id. | |
134 | * @id: Communication Identifier | |
135 | */ | |
136 | struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id) | |
137 | { | |
138 | struct rdma_id_private *id_priv; | |
139 | ||
140 | id_priv = container_of(id, struct rdma_id_private, id); | |
141 | if (id->device->node_type == RDMA_NODE_RNIC) | |
142 | return id_priv->cm_id.iw; | |
143 | return NULL; | |
144 | } | |
145 | EXPORT_SYMBOL(rdma_iw_cm_id); | |
146 | ||
147 | /** | |
148 | * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack. | |
149 | * @res: rdma resource tracking entry pointer | |
150 | */ | |
151 | struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res) | |
152 | { | |
153 | struct rdma_id_private *id_priv = | |
154 | container_of(res, struct rdma_id_private, res); | |
155 | ||
156 | return &id_priv->id; | |
157 | } | |
158 | EXPORT_SYMBOL(rdma_res_to_id); | |
159 | ||
11a0ae4c | 160 | static int cma_add_one(struct ib_device *device); |
7c1eb45a | 161 | static void cma_remove_one(struct ib_device *device, void *client_data); |
e51060f0 SH |
162 | |
163 | static struct ib_client cma_client = { | |
164 | .name = "cma", | |
165 | .add = cma_add_one, | |
166 | .remove = cma_remove_one | |
167 | }; | |
168 | ||
c1a0b23b | 169 | static struct ib_sa_client sa_client; |
e51060f0 SH |
170 | static LIST_HEAD(dev_list); |
171 | static LIST_HEAD(listen_any_list); | |
172 | static DEFINE_MUTEX(lock); | |
fc008bdb PH |
173 | static struct rb_root id_table = RB_ROOT; |
174 | /* Serialize operations of id_table tree */ | |
175 | static DEFINE_SPINLOCK(id_table_lock); | |
e51060f0 | 176 | static struct workqueue_struct *cma_wq; |
c7d03a00 | 177 | static unsigned int cma_pernet_id; |
e51060f0 | 178 | |
4be74b42 | 179 | struct cma_pernet { |
63826753 MW |
180 | struct xarray tcp_ps; |
181 | struct xarray udp_ps; | |
182 | struct xarray ipoib_ps; | |
183 | struct xarray ib_ps; | |
4be74b42 HE |
184 | }; |
185 | ||
186 | static struct cma_pernet *cma_pernet(struct net *net) | |
187 | { | |
188 | return net_generic(net, cma_pernet_id); | |
189 | } | |
190 | ||
63826753 MW |
191 | static |
192 | struct xarray *cma_pernet_xa(struct net *net, enum rdma_ucm_port_space ps) | |
aac978e1 | 193 | { |
4be74b42 HE |
194 | struct cma_pernet *pernet = cma_pernet(net); |
195 | ||
aac978e1 HE |
196 | switch (ps) { |
197 | case RDMA_PS_TCP: | |
4be74b42 | 198 | return &pernet->tcp_ps; |
aac978e1 | 199 | case RDMA_PS_UDP: |
4be74b42 | 200 | return &pernet->udp_ps; |
aac978e1 | 201 | case RDMA_PS_IPOIB: |
4be74b42 | 202 | return &pernet->ipoib_ps; |
aac978e1 | 203 | case RDMA_PS_IB: |
4be74b42 | 204 | return &pernet->ib_ps; |
aac978e1 HE |
205 | default: |
206 | return NULL; | |
207 | } | |
208 | } | |
209 | ||
fc008bdb PH |
210 | struct id_table_entry { |
211 | struct list_head id_list; | |
212 | struct rb_node rb_node; | |
213 | }; | |
214 | ||
e51060f0 SH |
215 | struct cma_device { |
216 | struct list_head list; | |
217 | struct ib_device *device; | |
e51060f0 | 218 | struct completion comp; |
be439912 | 219 | refcount_t refcount; |
e51060f0 | 220 | struct list_head id_list; |
045959db | 221 | enum ib_gid_type *default_gid_type; |
89052d78 | 222 | u8 *default_roce_tos; |
e51060f0 SH |
223 | }; |
224 | ||
e51060f0 | 225 | struct rdma_bind_list { |
2253fc0c | 226 | enum rdma_ucm_port_space ps; |
e51060f0 SH |
227 | struct hlist_head owners; |
228 | unsigned short port; | |
229 | }; | |
230 | ||
2253fc0c | 231 | static int cma_ps_alloc(struct net *net, enum rdma_ucm_port_space ps, |
aac978e1 HE |
232 | struct rdma_bind_list *bind_list, int snum) |
233 | { | |
63826753 | 234 | struct xarray *xa = cma_pernet_xa(net, ps); |
aac978e1 | 235 | |
63826753 | 236 | return xa_insert(xa, snum, bind_list, GFP_KERNEL); |
aac978e1 HE |
237 | } |
238 | ||
4be74b42 | 239 | static struct rdma_bind_list *cma_ps_find(struct net *net, |
2253fc0c | 240 | enum rdma_ucm_port_space ps, int snum) |
aac978e1 | 241 | { |
63826753 | 242 | struct xarray *xa = cma_pernet_xa(net, ps); |
aac978e1 | 243 | |
63826753 | 244 | return xa_load(xa, snum); |
aac978e1 HE |
245 | } |
246 | ||
2253fc0c SW |
247 | static void cma_ps_remove(struct net *net, enum rdma_ucm_port_space ps, |
248 | int snum) | |
aac978e1 | 249 | { |
63826753 | 250 | struct xarray *xa = cma_pernet_xa(net, ps); |
aac978e1 | 251 | |
63826753 | 252 | xa_erase(xa, snum); |
aac978e1 HE |
253 | } |
254 | ||
68602120 SH |
255 | enum { |
256 | CMA_OPTION_AFONLY, | |
257 | }; | |
258 | ||
5ff8c8fa | 259 | void cma_dev_get(struct cma_device *cma_dev) |
218a773f | 260 | { |
be439912 | 261 | refcount_inc(&cma_dev->refcount); |
218a773f MB |
262 | } |
263 | ||
5ff8c8fa PP |
264 | void cma_dev_put(struct cma_device *cma_dev) |
265 | { | |
be439912 | 266 | if (refcount_dec_and_test(&cma_dev->refcount)) |
5ff8c8fa PP |
267 | complete(&cma_dev->comp); |
268 | } | |
269 | ||
045959db MB |
270 | struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, |
271 | void *cookie) | |
272 | { | |
273 | struct cma_device *cma_dev; | |
274 | struct cma_device *found_cma_dev = NULL; | |
275 | ||
276 | mutex_lock(&lock); | |
277 | ||
278 | list_for_each_entry(cma_dev, &dev_list, list) | |
279 | if (filter(cma_dev->device, cookie)) { | |
280 | found_cma_dev = cma_dev; | |
281 | break; | |
282 | } | |
283 | ||
284 | if (found_cma_dev) | |
5ff8c8fa | 285 | cma_dev_get(found_cma_dev); |
045959db MB |
286 | mutex_unlock(&lock); |
287 | return found_cma_dev; | |
288 | } | |
289 | ||
290 | int cma_get_default_gid_type(struct cma_device *cma_dev, | |
1fb7f897 | 291 | u32 port) |
045959db | 292 | { |
24dc831b | 293 | if (!rdma_is_port_valid(cma_dev->device, port)) |
045959db MB |
294 | return -EINVAL; |
295 | ||
296 | return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)]; | |
297 | } | |
298 | ||
299 | int cma_set_default_gid_type(struct cma_device *cma_dev, | |
1fb7f897 | 300 | u32 port, |
045959db MB |
301 | enum ib_gid_type default_gid_type) |
302 | { | |
303 | unsigned long supported_gids; | |
304 | ||
24dc831b | 305 | if (!rdma_is_port_valid(cma_dev->device, port)) |
045959db MB |
306 | return -EINVAL; |
307 | ||
1c15b4f2 AH |
308 | if (default_gid_type == IB_GID_TYPE_IB && |
309 | rdma_protocol_roce_eth_encap(cma_dev->device, port)) | |
310 | default_gid_type = IB_GID_TYPE_ROCE; | |
311 | ||
045959db MB |
312 | supported_gids = roce_gid_type_mask_support(cma_dev->device, port); |
313 | ||
314 | if (!(supported_gids & 1 << default_gid_type)) | |
315 | return -EINVAL; | |
316 | ||
317 | cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] = | |
318 | default_gid_type; | |
319 | ||
320 | return 0; | |
321 | } | |
322 | ||
1fb7f897 | 323 | int cma_get_default_roce_tos(struct cma_device *cma_dev, u32 port) |
89052d78 MD |
324 | { |
325 | if (!rdma_is_port_valid(cma_dev->device, port)) | |
326 | return -EINVAL; | |
327 | ||
328 | return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)]; | |
329 | } | |
330 | ||
1fb7f897 | 331 | int cma_set_default_roce_tos(struct cma_device *cma_dev, u32 port, |
89052d78 MD |
332 | u8 default_roce_tos) |
333 | { | |
334 | if (!rdma_is_port_valid(cma_dev->device, port)) | |
335 | return -EINVAL; | |
336 | ||
337 | cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)] = | |
338 | default_roce_tos; | |
339 | ||
340 | return 0; | |
341 | } | |
045959db MB |
342 | struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev) |
343 | { | |
344 | return cma_dev->device; | |
345 | } | |
346 | ||
e51060f0 SH |
347 | /* |
348 | * Device removal can occur at anytime, so we need extra handling to | |
349 | * serialize notifying the user of device removal with other callbacks. | |
350 | * We do this by disabling removal notification while a callback is in process, | |
351 | * and reporting it after the callback completes. | |
352 | */ | |
e51060f0 | 353 | |
c8f6a362 SH |
354 | struct cma_multicast { |
355 | struct rdma_id_private *id_priv; | |
fe454dc3 AH |
356 | union { |
357 | struct ib_sa_multicast *sa_mc; | |
358 | struct { | |
359 | struct work_struct work; | |
360 | struct rdma_cm_event event; | |
361 | } iboe_join; | |
362 | }; | |
c8f6a362 SH |
363 | struct list_head list; |
364 | void *context; | |
3f446754 | 365 | struct sockaddr_storage addr; |
ab15c95a | 366 | u8 join_state; |
c8f6a362 SH |
367 | }; |
368 | ||
e51060f0 SH |
369 | struct cma_work { |
370 | struct work_struct work; | |
371 | struct rdma_id_private *id; | |
550e5ca7 NM |
372 | enum rdma_cm_state old_state; |
373 | enum rdma_cm_state new_state; | |
e51060f0 SH |
374 | struct rdma_cm_event event; |
375 | }; | |
376 | ||
377 | union cma_ip_addr { | |
378 | struct in6_addr ip6; | |
379 | struct { | |
1b90c137 AV |
380 | __be32 pad[3]; |
381 | __be32 addr; | |
e51060f0 SH |
382 | } ip4; |
383 | }; | |
384 | ||
385 | struct cma_hdr { | |
386 | u8 cma_version; | |
387 | u8 ip_version; /* IP version: 7:4 */ | |
1b90c137 | 388 | __be16 port; |
e51060f0 SH |
389 | union cma_ip_addr src_addr; |
390 | union cma_ip_addr dst_addr; | |
391 | }; | |
392 | ||
e51060f0 | 393 | #define CMA_VERSION 0x00 |
e51060f0 | 394 | |
4c21b5bc | 395 | struct cma_req_info { |
2918c1a9 PP |
396 | struct sockaddr_storage listen_addr_storage; |
397 | struct sockaddr_storage src_addr_storage; | |
4c21b5bc | 398 | struct ib_device *device; |
4c21b5bc HE |
399 | union ib_gid local_gid; |
400 | __be64 service_id; | |
05e0b86c PP |
401 | int port; |
402 | bool has_gid; | |
4c21b5bc | 403 | u16 pkey; |
4c21b5bc HE |
404 | }; |
405 | ||
e51060f0 | 406 | static int cma_comp_exch(struct rdma_id_private *id_priv, |
550e5ca7 | 407 | enum rdma_cm_state comp, enum rdma_cm_state exch) |
e51060f0 SH |
408 | { |
409 | unsigned long flags; | |
410 | int ret; | |
411 | ||
2a7cec53 JG |
412 | /* |
413 | * The FSM uses a funny double locking where state is protected by both | |
414 | * the handler_mutex and the spinlock. State is not allowed to change | |
071ba4cc | 415 | * to/from a handler_mutex protected value without also holding |
2a7cec53 JG |
416 | * handler_mutex. |
417 | */ | |
071ba4cc | 418 | if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT) |
2a7cec53 JG |
419 | lockdep_assert_held(&id_priv->handler_mutex); |
420 | ||
e51060f0 SH |
421 | spin_lock_irqsave(&id_priv->lock, flags); |
422 | if ((ret = (id_priv->state == comp))) | |
423 | id_priv->state = exch; | |
424 | spin_unlock_irqrestore(&id_priv->lock, flags); | |
425 | return ret; | |
426 | } | |
427 | ||
4c21b5bc | 428 | static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr) |
e51060f0 SH |
429 | { |
430 | return hdr->ip_version >> 4; | |
431 | } | |
432 | ||
fc008bdb | 433 | static void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) |
e51060f0 SH |
434 | { |
435 | hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); | |
436 | } | |
437 | ||
fc008bdb PH |
438 | static struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv) |
439 | { | |
440 | return (struct sockaddr *)&id_priv->id.route.addr.src_addr; | |
441 | } | |
442 | ||
443 | static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv) | |
444 | { | |
445 | return (struct sockaddr *)&id_priv->id.route.addr.dst_addr; | |
446 | } | |
447 | ||
bee3c3c9 | 448 | static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join) |
e51060f0 | 449 | { |
bee3c3c9 MS |
450 | struct in_device *in_dev = NULL; |
451 | ||
452 | if (ndev) { | |
453 | rtnl_lock(); | |
454 | in_dev = __in_dev_get_rtnl(ndev); | |
455 | if (in_dev) { | |
456 | if (join) | |
457 | ip_mc_inc_group(in_dev, | |
458 | *(__be32 *)(mgid->raw + 12)); | |
459 | else | |
460 | ip_mc_dec_group(in_dev, | |
461 | *(__be32 *)(mgid->raw + 12)); | |
462 | } | |
463 | rtnl_unlock(); | |
464 | } | |
465 | return (in_dev) ? 0 : -ENODEV; | |
466 | } | |
467 | ||
fc008bdb PH |
468 | static int compare_netdev_and_ip(int ifindex_a, struct sockaddr *sa, |
469 | struct id_table_entry *entry_b) | |
470 | { | |
471 | struct rdma_id_private *id_priv = list_first_entry( | |
472 | &entry_b->id_list, struct rdma_id_private, id_list_entry); | |
473 | int ifindex_b = id_priv->id.route.addr.dev_addr.bound_dev_if; | |
474 | struct sockaddr *sb = cma_dst_addr(id_priv); | |
475 | ||
476 | if (ifindex_a != ifindex_b) | |
477 | return (ifindex_a > ifindex_b) ? 1 : -1; | |
478 | ||
479 | if (sa->sa_family != sb->sa_family) | |
480 | return sa->sa_family - sb->sa_family; | |
481 | ||
482 | if (sa->sa_family == AF_INET) | |
483 | return memcmp((char *)&((struct sockaddr_in *)sa)->sin_addr, | |
484 | (char *)&((struct sockaddr_in *)sb)->sin_addr, | |
485 | sizeof(((struct sockaddr_in *)sa)->sin_addr)); | |
486 | ||
487 | return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr, | |
488 | &((struct sockaddr_in6 *)sb)->sin6_addr); | |
489 | } | |
490 | ||
491 | static int cma_add_id_to_tree(struct rdma_id_private *node_id_priv) | |
492 | { | |
493 | struct rb_node **new, *parent = NULL; | |
494 | struct id_table_entry *this, *node; | |
495 | unsigned long flags; | |
496 | int result; | |
497 | ||
498 | node = kzalloc(sizeof(*node), GFP_KERNEL); | |
499 | if (!node) | |
500 | return -ENOMEM; | |
501 | ||
502 | spin_lock_irqsave(&id_table_lock, flags); | |
503 | new = &id_table.rb_node; | |
504 | while (*new) { | |
505 | this = container_of(*new, struct id_table_entry, rb_node); | |
506 | result = compare_netdev_and_ip( | |
507 | node_id_priv->id.route.addr.dev_addr.bound_dev_if, | |
508 | cma_dst_addr(node_id_priv), this); | |
509 | ||
510 | parent = *new; | |
511 | if (result < 0) | |
512 | new = &((*new)->rb_left); | |
513 | else if (result > 0) | |
514 | new = &((*new)->rb_right); | |
515 | else { | |
516 | list_add_tail(&node_id_priv->id_list_entry, | |
517 | &this->id_list); | |
518 | kfree(node); | |
519 | goto unlock; | |
520 | } | |
521 | } | |
522 | ||
523 | INIT_LIST_HEAD(&node->id_list); | |
524 | list_add_tail(&node_id_priv->id_list_entry, &node->id_list); | |
525 | ||
526 | rb_link_node(&node->rb_node, parent, new); | |
527 | rb_insert_color(&node->rb_node, &id_table); | |
528 | ||
529 | unlock: | |
530 | spin_unlock_irqrestore(&id_table_lock, flags); | |
531 | return 0; | |
532 | } | |
533 | ||
534 | static struct id_table_entry * | |
535 | node_from_ndev_ip(struct rb_root *root, int ifindex, struct sockaddr *sa) | |
536 | { | |
537 | struct rb_node *node = root->rb_node; | |
538 | struct id_table_entry *data; | |
539 | int result; | |
540 | ||
541 | while (node) { | |
542 | data = container_of(node, struct id_table_entry, rb_node); | |
543 | result = compare_netdev_and_ip(ifindex, sa, data); | |
544 | if (result < 0) | |
545 | node = node->rb_left; | |
546 | else if (result > 0) | |
547 | node = node->rb_right; | |
548 | else | |
549 | return data; | |
550 | } | |
551 | ||
552 | return NULL; | |
553 | } | |
554 | ||
555 | static void cma_remove_id_from_tree(struct rdma_id_private *id_priv) | |
556 | { | |
557 | struct id_table_entry *data; | |
558 | unsigned long flags; | |
559 | ||
560 | spin_lock_irqsave(&id_table_lock, flags); | |
561 | if (list_empty(&id_priv->id_list_entry)) | |
562 | goto out; | |
563 | ||
564 | data = node_from_ndev_ip(&id_table, | |
565 | id_priv->id.route.addr.dev_addr.bound_dev_if, | |
566 | cma_dst_addr(id_priv)); | |
567 | if (!data) | |
568 | goto out; | |
569 | ||
570 | list_del_init(&id_priv->id_list_entry); | |
571 | if (list_empty(&data->id_list)) { | |
572 | rb_erase(&data->rb_node, &id_table); | |
573 | kfree(data); | |
574 | } | |
575 | out: | |
576 | spin_unlock_irqrestore(&id_table_lock, flags); | |
577 | } | |
578 | ||
045959db MB |
579 | static void _cma_attach_to_dev(struct rdma_id_private *id_priv, |
580 | struct cma_device *cma_dev) | |
e51060f0 | 581 | { |
5ff8c8fa | 582 | cma_dev_get(cma_dev); |
e51060f0 SH |
583 | id_priv->cma_dev = cma_dev; |
584 | id_priv->id.device = cma_dev->device; | |
3c86aa70 EC |
585 | id_priv->id.route.addr.dev_addr.transport = |
586 | rdma_node_get_transport(cma_dev->device->node_type); | |
99cfddb8 | 587 | list_add_tail(&id_priv->device_item, &cma_dev->id_list); |
b09c4d70 | 588 | |
278f74b3 | 589 | trace_cm_id_attach(id_priv, cma_dev->device); |
e51060f0 SH |
590 | } |
591 | ||
045959db MB |
592 | static void cma_attach_to_dev(struct rdma_id_private *id_priv, |
593 | struct cma_device *cma_dev) | |
594 | { | |
595 | _cma_attach_to_dev(id_priv, cma_dev); | |
596 | id_priv->gid_type = | |
597 | cma_dev->default_gid_type[id_priv->id.port_num - | |
598 | rdma_start_port(cma_dev->device)]; | |
599 | } | |
600 | ||
a396d43a | 601 | static void cma_release_dev(struct rdma_id_private *id_priv) |
e51060f0 | 602 | { |
a396d43a | 603 | mutex_lock(&lock); |
99cfddb8 | 604 | list_del_init(&id_priv->device_item); |
5ff8c8fa | 605 | cma_dev_put(id_priv->cma_dev); |
e51060f0 | 606 | id_priv->cma_dev = NULL; |
889d916b | 607 | id_priv->id.device = NULL; |
e246b7c0 LR |
608 | if (id_priv->id.route.addr.dev_addr.sgid_attr) { |
609 | rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr); | |
610 | id_priv->id.route.addr.dev_addr.sgid_attr = NULL; | |
611 | } | |
a396d43a | 612 | mutex_unlock(&lock); |
e51060f0 SH |
613 | } |
614 | ||
f4753834 SH |
615 | static inline unsigned short cma_family(struct rdma_id_private *id_priv) |
616 | { | |
617 | return id_priv->id.route.addr.src_addr.ss_family; | |
618 | } | |
619 | ||
5c438135 | 620 | static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) |
c8f6a362 SH |
621 | { |
622 | struct ib_sa_mcmember_rec rec; | |
623 | int ret = 0; | |
624 | ||
5c438135 SH |
625 | if (id_priv->qkey) { |
626 | if (qkey && id_priv->qkey != qkey) | |
627 | return -EINVAL; | |
d2ca39f2 | 628 | return 0; |
5c438135 SH |
629 | } |
630 | ||
631 | if (qkey) { | |
632 | id_priv->qkey = qkey; | |
633 | return 0; | |
634 | } | |
d2ca39f2 YE |
635 | |
636 | switch (id_priv->id.ps) { | |
c8f6a362 | 637 | case RDMA_PS_UDP: |
5c438135 | 638 | case RDMA_PS_IB: |
d2ca39f2 | 639 | id_priv->qkey = RDMA_UDP_QKEY; |
c8f6a362 SH |
640 | break; |
641 | case RDMA_PS_IPOIB: | |
d2ca39f2 YE |
642 | ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); |
643 | ret = ib_sa_get_mcmember_rec(id_priv->id.device, | |
644 | id_priv->id.port_num, &rec.mgid, | |
645 | &rec); | |
646 | if (!ret) | |
647 | id_priv->qkey = be32_to_cpu(rec.qkey); | |
c8f6a362 SH |
648 | break; |
649 | default: | |
650 | break; | |
651 | } | |
652 | return ret; | |
653 | } | |
654 | ||
680f920a SH |
655 | static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr) |
656 | { | |
657 | dev_addr->dev_type = ARPHRD_INFINIBAND; | |
658 | rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr); | |
659 | ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey)); | |
660 | } | |
661 | ||
662 | static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) | |
663 | { | |
664 | int ret; | |
665 | ||
666 | if (addr->sa_family != AF_IB) { | |
575c7e58 | 667 | ret = rdma_translate_ip(addr, dev_addr); |
680f920a SH |
668 | } else { |
669 | cma_translate_ib((struct sockaddr_ib *) addr, dev_addr); | |
670 | ret = 0; | |
671 | } | |
672 | ||
673 | return ret; | |
674 | } | |
675 | ||
4ed13a5f | 676 | static const struct ib_gid_attr * |
1fb7f897 | 677 | cma_validate_port(struct ib_device *device, u32 port, |
4ed13a5f PP |
678 | enum ib_gid_type gid_type, |
679 | union ib_gid *gid, | |
680 | struct rdma_id_private *id_priv) | |
7c11147d | 681 | { |
2493a57b PP |
682 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; |
683 | int bound_if_index = dev_addr->bound_dev_if; | |
4ed13a5f | 684 | const struct ib_gid_attr *sgid_attr; |
2493a57b | 685 | int dev_type = dev_addr->dev_type; |
abae1b71 | 686 | struct net_device *ndev = NULL; |
7c11147d | 687 | |
41c61401 PP |
688 | if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net)) |
689 | return ERR_PTR(-ENODEV); | |
690 | ||
7c11147d | 691 | if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port)) |
4ed13a5f | 692 | return ERR_PTR(-ENODEV); |
7c11147d MW |
693 | |
694 | if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) | |
4ed13a5f | 695 | return ERR_PTR(-ENODEV); |
7c11147d | 696 | |
00db63c1 | 697 | if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) { |
66c74d74 | 698 | ndev = dev_get_by_index(dev_addr->net, bound_if_index); |
00db63c1 | 699 | if (!ndev) |
4ed13a5f | 700 | return ERR_PTR(-ENODEV); |
00db63c1 | 701 | } else { |
045959db | 702 | gid_type = IB_GID_TYPE_IB; |
00db63c1 | 703 | } |
abae1b71 | 704 | |
4ed13a5f | 705 | sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev); |
abae1b71 MB |
706 | if (ndev) |
707 | dev_put(ndev); | |
4ed13a5f PP |
708 | return sgid_attr; |
709 | } | |
7c11147d | 710 | |
4ed13a5f PP |
711 | static void cma_bind_sgid_attr(struct rdma_id_private *id_priv, |
712 | const struct ib_gid_attr *sgid_attr) | |
713 | { | |
714 | WARN_ON(id_priv->id.route.addr.dev_addr.sgid_attr); | |
715 | id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr; | |
7c11147d MW |
716 | } |
717 | ||
ff11c6cd PP |
718 | /** |
719 | * cma_acquire_dev_by_src_ip - Acquire cma device, port, gid attribute | |
720 | * based on source ip address. | |
721 | * @id_priv: cm_id which should be bound to cma device | |
722 | * | |
723 | * cma_acquire_dev_by_src_ip() binds cm id to cma device, port and GID attribute | |
724 | * based on source IP address. It returns 0 on success or error code otherwise. | |
725 | * It is applicable to active and passive side cm_id. | |
726 | */ | |
727 | static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv) | |
728 | { | |
729 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; | |
730 | const struct ib_gid_attr *sgid_attr; | |
731 | union ib_gid gid, iboe_gid, *gidp; | |
732 | struct cma_device *cma_dev; | |
733 | enum ib_gid_type gid_type; | |
734 | int ret = -ENODEV; | |
1fb7f897 | 735 | u32 port; |
ff11c6cd PP |
736 | |
737 | if (dev_addr->dev_type != ARPHRD_INFINIBAND && | |
738 | id_priv->id.ps == RDMA_PS_IPOIB) | |
739 | return -EINVAL; | |
740 | ||
741 | rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, | |
742 | &iboe_gid); | |
743 | ||
744 | memcpy(&gid, dev_addr->src_dev_addr + | |
745 | rdma_addr_gid_offset(dev_addr), sizeof(gid)); | |
746 | ||
747 | mutex_lock(&lock); | |
748 | list_for_each_entry(cma_dev, &dev_list, list) { | |
ea1075ed | 749 | rdma_for_each_port (cma_dev->device, port) { |
ff11c6cd PP |
750 | gidp = rdma_protocol_roce(cma_dev->device, port) ? |
751 | &iboe_gid : &gid; | |
752 | gid_type = cma_dev->default_gid_type[port - 1]; | |
753 | sgid_attr = cma_validate_port(cma_dev->device, port, | |
754 | gid_type, gidp, id_priv); | |
755 | if (!IS_ERR(sgid_attr)) { | |
756 | id_priv->id.port_num = port; | |
757 | cma_bind_sgid_attr(id_priv, sgid_attr); | |
758 | cma_attach_to_dev(id_priv, cma_dev); | |
759 | ret = 0; | |
760 | goto out; | |
761 | } | |
762 | } | |
763 | } | |
764 | out: | |
765 | mutex_unlock(&lock); | |
766 | return ret; | |
767 | } | |
768 | ||
41ab1cb7 PP |
769 | /** |
770 | * cma_ib_acquire_dev - Acquire cma device, port and SGID attribute | |
771 | * @id_priv: cm id to bind to cma device | |
772 | * @listen_id_priv: listener cm id to match against | |
773 | * @req: Pointer to req structure containaining incoming | |
774 | * request information | |
775 | * cma_ib_acquire_dev() acquires cma device, port and SGID attribute when | |
776 | * rdma device matches for listen_id and incoming request. It also verifies | |
777 | * that a GID table entry is present for the source address. | |
778 | * Returns 0 on success, or returns error code otherwise. | |
779 | */ | |
780 | static int cma_ib_acquire_dev(struct rdma_id_private *id_priv, | |
781 | const struct rdma_id_private *listen_id_priv, | |
782 | struct cma_req_info *req) | |
783 | { | |
784 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; | |
785 | const struct ib_gid_attr *sgid_attr; | |
786 | enum ib_gid_type gid_type; | |
787 | union ib_gid gid; | |
788 | ||
789 | if (dev_addr->dev_type != ARPHRD_INFINIBAND && | |
790 | id_priv->id.ps == RDMA_PS_IPOIB) | |
791 | return -EINVAL; | |
792 | ||
793 | if (rdma_protocol_roce(req->device, req->port)) | |
794 | rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, | |
795 | &gid); | |
796 | else | |
797 | memcpy(&gid, dev_addr->src_dev_addr + | |
798 | rdma_addr_gid_offset(dev_addr), sizeof(gid)); | |
799 | ||
800 | gid_type = listen_id_priv->cma_dev->default_gid_type[req->port - 1]; | |
801 | sgid_attr = cma_validate_port(req->device, req->port, | |
802 | gid_type, &gid, id_priv); | |
803 | if (IS_ERR(sgid_attr)) | |
804 | return PTR_ERR(sgid_attr); | |
805 | ||
806 | id_priv->id.port_num = req->port; | |
807 | cma_bind_sgid_attr(id_priv, sgid_attr); | |
808 | /* Need to acquire lock to protect against reader | |
809 | * of cma_dev->id_list such as cma_netdev_callback() and | |
810 | * cma_process_remove(). | |
811 | */ | |
812 | mutex_lock(&lock); | |
813 | cma_attach_to_dev(id_priv, listen_id_priv->cma_dev); | |
814 | mutex_unlock(&lock); | |
cb5cd0ea | 815 | rdma_restrack_add(&id_priv->res); |
41ab1cb7 PP |
816 | return 0; |
817 | } | |
818 | ||
819 | static int cma_iw_acquire_dev(struct rdma_id_private *id_priv, | |
820 | const struct rdma_id_private *listen_id_priv) | |
e51060f0 | 821 | { |
c8f6a362 | 822 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; |
4ed13a5f | 823 | const struct ib_gid_attr *sgid_attr; |
e51060f0 | 824 | struct cma_device *cma_dev; |
4ed13a5f | 825 | enum ib_gid_type gid_type; |
e51060f0 | 826 | int ret = -ENODEV; |
41ab1cb7 | 827 | union ib_gid gid; |
1fb7f897 | 828 | u32 port; |
e51060f0 | 829 | |
7c11147d | 830 | if (dev_addr->dev_type != ARPHRD_INFINIBAND && |
2efdd6a0 MS |
831 | id_priv->id.ps == RDMA_PS_IPOIB) |
832 | return -EINVAL; | |
833 | ||
3c86aa70 | 834 | memcpy(&gid, dev_addr->src_dev_addr + |
41ab1cb7 PP |
835 | rdma_addr_gid_offset(dev_addr), sizeof(gid)); |
836 | ||
837 | mutex_lock(&lock); | |
7c11147d | 838 | |
ff11c6cd PP |
839 | cma_dev = listen_id_priv->cma_dev; |
840 | port = listen_id_priv->id.port_num; | |
ff11c6cd PP |
841 | gid_type = listen_id_priv->gid_type; |
842 | sgid_attr = cma_validate_port(cma_dev->device, port, | |
41ab1cb7 | 843 | gid_type, &gid, id_priv); |
ff11c6cd PP |
844 | if (!IS_ERR(sgid_attr)) { |
845 | id_priv->id.port_num = port; | |
846 | cma_bind_sgid_attr(id_priv, sgid_attr); | |
847 | ret = 0; | |
848 | goto out; | |
be9130cc | 849 | } |
7c11147d | 850 | |
e51060f0 | 851 | list_for_each_entry(cma_dev, &dev_list, list) { |
cc055dd3 | 852 | rdma_for_each_port (cma_dev->device, port) { |
ff11c6cd | 853 | if (listen_id_priv->cma_dev == cma_dev && |
be9130cc DL |
854 | listen_id_priv->id.port_num == port) |
855 | continue; | |
7c11147d | 856 | |
79d684f0 | 857 | gid_type = cma_dev->default_gid_type[port - 1]; |
4ed13a5f | 858 | sgid_attr = cma_validate_port(cma_dev->device, port, |
41ab1cb7 | 859 | gid_type, &gid, id_priv); |
4ed13a5f | 860 | if (!IS_ERR(sgid_attr)) { |
7c11147d | 861 | id_priv->id.port_num = port; |
4ed13a5f PP |
862 | cma_bind_sgid_attr(id_priv, sgid_attr); |
863 | ret = 0; | |
7c11147d | 864 | goto out; |
3c86aa70 | 865 | } |
e51060f0 SH |
866 | } |
867 | } | |
3c86aa70 EC |
868 | |
869 | out: | |
cb5cd0ea | 870 | if (!ret) { |
3c86aa70 | 871 | cma_attach_to_dev(id_priv, cma_dev); |
cb5cd0ea SD |
872 | rdma_restrack_add(&id_priv->res); |
873 | } | |
3c86aa70 | 874 | |
a396d43a | 875 | mutex_unlock(&lock); |
e51060f0 SH |
876 | return ret; |
877 | } | |
878 | ||
f17df3b0 SH |
879 | /* |
880 | * Select the source IB device and address to reach the destination IB address. | |
881 | */ | |
882 | static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) | |
883 | { | |
884 | struct cma_device *cma_dev, *cur_dev; | |
885 | struct sockaddr_ib *addr; | |
886 | union ib_gid gid, sgid, *dgid; | |
cc055dd3 | 887 | unsigned int p; |
f17df3b0 | 888 | u16 pkey, index; |
93b1f29d | 889 | enum ib_port_state port_state; |
20679094 | 890 | int ret; |
f17df3b0 SH |
891 | int i; |
892 | ||
893 | cma_dev = NULL; | |
894 | addr = (struct sockaddr_ib *) cma_dst_addr(id_priv); | |
895 | dgid = (union ib_gid *) &addr->sib_addr; | |
896 | pkey = ntohs(addr->sib_pkey); | |
897 | ||
954a8e3a | 898 | mutex_lock(&lock); |
f17df3b0 | 899 | list_for_each_entry(cur_dev, &dev_list, list) { |
cc055dd3 | 900 | rdma_for_each_port (cur_dev->device, p) { |
30a74ef4 | 901 | if (!rdma_cap_af_ib(cur_dev->device, p)) |
fef60902 MW |
902 | continue; |
903 | ||
f17df3b0 SH |
904 | if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index)) |
905 | continue; | |
906 | ||
93b1f29d JW |
907 | if (ib_get_cached_port_state(cur_dev->device, p, &port_state)) |
908 | continue; | |
20679094 AH |
909 | |
910 | for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len; | |
911 | ++i) { | |
912 | ret = rdma_query_gid(cur_dev->device, p, i, | |
913 | &gid); | |
914 | if (ret) | |
915 | continue; | |
916 | ||
f17df3b0 SH |
917 | if (!memcmp(&gid, dgid, sizeof(gid))) { |
918 | cma_dev = cur_dev; | |
919 | sgid = gid; | |
8fb488d7 | 920 | id_priv->id.port_num = p; |
f17df3b0 SH |
921 | goto found; |
922 | } | |
923 | ||
924 | if (!cma_dev && (gid.global.subnet_prefix == | |
93b1f29d JW |
925 | dgid->global.subnet_prefix) && |
926 | port_state == IB_PORT_ACTIVE) { | |
f17df3b0 SH |
927 | cma_dev = cur_dev; |
928 | sgid = gid; | |
8fb488d7 | 929 | id_priv->id.port_num = p; |
954a8e3a | 930 | goto found; |
f17df3b0 SH |
931 | } |
932 | } | |
933 | } | |
934 | } | |
954a8e3a PP |
935 | mutex_unlock(&lock); |
936 | return -ENODEV; | |
f17df3b0 SH |
937 | |
938 | found: | |
939 | cma_attach_to_dev(id_priv, cma_dev); | |
cb5cd0ea | 940 | rdma_restrack_add(&id_priv->res); |
954a8e3a PP |
941 | mutex_unlock(&lock); |
942 | addr = (struct sockaddr_ib *)cma_src_addr(id_priv); | |
943 | memcpy(&addr->sib_addr, &sgid, sizeof(sgid)); | |
f17df3b0 SH |
944 | cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); |
945 | return 0; | |
946 | } | |
947 | ||
e368d23f PP |
948 | static void cma_id_get(struct rdma_id_private *id_priv) |
949 | { | |
43fb5892 | 950 | refcount_inc(&id_priv->refcount); |
e368d23f PP |
951 | } |
952 | ||
953 | static void cma_id_put(struct rdma_id_private *id_priv) | |
e51060f0 | 954 | { |
43fb5892 | 955 | if (refcount_dec_and_test(&id_priv->refcount)) |
e51060f0 SH |
956 | complete(&id_priv->comp); |
957 | } | |
958 | ||
b09c4d70 LR |
959 | static struct rdma_id_private * |
960 | __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler, | |
961 | void *context, enum rdma_ucm_port_space ps, | |
962 | enum ib_qp_type qp_type, const struct rdma_id_private *parent) | |
e51060f0 SH |
963 | { |
964 | struct rdma_id_private *id_priv; | |
965 | ||
966 | id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); | |
967 | if (!id_priv) | |
968 | return ERR_PTR(-ENOMEM); | |
969 | ||
550e5ca7 | 970 | id_priv->state = RDMA_CM_IDLE; |
e51060f0 SH |
971 | id_priv->id.context = context; |
972 | id_priv->id.event_handler = event_handler; | |
973 | id_priv->id.ps = ps; | |
b26f9b99 | 974 | id_priv->id.qp_type = qp_type; |
89052d78 | 975 | id_priv->tos_set = false; |
2c1619ed | 976 | id_priv->timeout_set = false; |
3aeffc46 | 977 | id_priv->min_rnr_timer_set = false; |
79d684f0 | 978 | id_priv->gid_type = IB_GID_TYPE_IB; |
e51060f0 | 979 | spin_lock_init(&id_priv->lock); |
c5483388 | 980 | mutex_init(&id_priv->qp_mutex); |
e51060f0 | 981 | init_completion(&id_priv->comp); |
43fb5892 | 982 | refcount_set(&id_priv->refcount, 1); |
de910bd9 | 983 | mutex_init(&id_priv->handler_mutex); |
99cfddb8 | 984 | INIT_LIST_HEAD(&id_priv->device_item); |
fc008bdb | 985 | INIT_LIST_HEAD(&id_priv->id_list_entry); |
e51060f0 | 986 | INIT_LIST_HEAD(&id_priv->listen_list); |
c8f6a362 | 987 | INIT_LIST_HEAD(&id_priv->mc_list); |
e51060f0 | 988 | get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); |
fa20105e | 989 | id_priv->id.route.addr.dev_addr.net = get_net(net); |
23a9cd2a | 990 | id_priv->seq_num &= 0x00ffffff; |
e51060f0 | 991 | |
13ef5539 | 992 | rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID); |
b09c4d70 LR |
993 | if (parent) |
994 | rdma_restrack_parent_name(&id_priv->res, &parent->res); | |
13ef5539 | 995 | |
b09c4d70 LR |
996 | return id_priv; |
997 | } | |
998 | ||
999 | struct rdma_cm_id * | |
1000 | __rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler, | |
1001 | void *context, enum rdma_ucm_port_space ps, | |
1002 | enum ib_qp_type qp_type, const char *caller) | |
1003 | { | |
1004 | struct rdma_id_private *ret; | |
1005 | ||
1006 | ret = __rdma_create_id(net, event_handler, context, ps, qp_type, NULL); | |
1007 | if (IS_ERR(ret)) | |
1008 | return ERR_CAST(ret); | |
1009 | ||
1010 | rdma_restrack_set_name(&ret->res, caller); | |
1011 | return &ret->id; | |
e51060f0 | 1012 | } |
b09c4d70 LR |
1013 | EXPORT_SYMBOL(__rdma_create_kernel_id); |
1014 | ||
1015 | struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler, | |
1016 | void *context, | |
1017 | enum rdma_ucm_port_space ps, | |
1018 | enum ib_qp_type qp_type) | |
1019 | { | |
1020 | struct rdma_id_private *ret; | |
1021 | ||
1022 | ret = __rdma_create_id(current->nsproxy->net_ns, event_handler, context, | |
1023 | ps, qp_type, NULL); | |
1024 | if (IS_ERR(ret)) | |
1025 | return ERR_CAST(ret); | |
1026 | ||
1027 | rdma_restrack_set_name(&ret->res, NULL); | |
1028 | return &ret->id; | |
e51060f0 | 1029 | } |
b09c4d70 | 1030 | EXPORT_SYMBOL(rdma_create_user_id); |
e51060f0 | 1031 | |
c8f6a362 | 1032 | static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) |
e51060f0 SH |
1033 | { |
1034 | struct ib_qp_attr qp_attr; | |
c8f6a362 | 1035 | int qp_attr_mask, ret; |
e51060f0 | 1036 | |
c8f6a362 SH |
1037 | qp_attr.qp_state = IB_QPS_INIT; |
1038 | ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); | |
e51060f0 SH |
1039 | if (ret) |
1040 | return ret; | |
1041 | ||
c8f6a362 SH |
1042 | ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); |
1043 | if (ret) | |
1044 | return ret; | |
1045 | ||
1046 | qp_attr.qp_state = IB_QPS_RTR; | |
1047 | ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); | |
1048 | if (ret) | |
1049 | return ret; | |
1050 | ||
1051 | qp_attr.qp_state = IB_QPS_RTS; | |
1052 | qp_attr.sq_psn = 0; | |
1053 | ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); | |
1054 | ||
1055 | return ret; | |
e51060f0 SH |
1056 | } |
1057 | ||
db4657af MM |
1058 | static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) |
1059 | { | |
1060 | struct ib_qp_attr qp_attr; | |
1061 | int qp_attr_mask, ret; | |
1062 | ||
1063 | qp_attr.qp_state = IB_QPS_INIT; | |
1064 | ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); | |
1065 | if (ret) | |
1066 | return ret; | |
1067 | ||
1068 | return ib_modify_qp(qp, &qp_attr, qp_attr_mask); | |
1069 | } | |
1070 | ||
e51060f0 SH |
1071 | int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, |
1072 | struct ib_qp_init_attr *qp_init_attr) | |
1073 | { | |
1074 | struct rdma_id_private *id_priv; | |
1075 | struct ib_qp *qp; | |
db4657af | 1076 | int ret; |
e51060f0 SH |
1077 | |
1078 | id_priv = container_of(id, struct rdma_id_private, id); | |
ed999f82 CL |
1079 | if (id->device != pd->device) { |
1080 | ret = -EINVAL; | |
1081 | goto out_err; | |
1082 | } | |
e51060f0 | 1083 | |
0691a286 | 1084 | qp_init_attr->port_num = id->port_num; |
e51060f0 | 1085 | qp = ib_create_qp(pd, qp_init_attr); |
ed999f82 CL |
1086 | if (IS_ERR(qp)) { |
1087 | ret = PTR_ERR(qp); | |
1088 | goto out_err; | |
1089 | } | |
e51060f0 | 1090 | |
b26f9b99 | 1091 | if (id->qp_type == IB_QPT_UD) |
c8f6a362 | 1092 | ret = cma_init_ud_qp(id_priv, qp); |
db4657af MM |
1093 | else |
1094 | ret = cma_init_conn_qp(id_priv, qp); | |
e51060f0 | 1095 | if (ret) |
ed999f82 | 1096 | goto out_destroy; |
e51060f0 SH |
1097 | |
1098 | id->qp = qp; | |
1099 | id_priv->qp_num = qp->qp_num; | |
e51060f0 | 1100 | id_priv->srq = (qp->srq != NULL); |
ed999f82 | 1101 | trace_cm_qp_create(id_priv, pd, qp_init_attr, 0); |
e51060f0 | 1102 | return 0; |
ed999f82 | 1103 | out_destroy: |
e51060f0 | 1104 | ib_destroy_qp(qp); |
ed999f82 CL |
1105 | out_err: |
1106 | trace_cm_qp_create(id_priv, pd, qp_init_attr, ret); | |
e51060f0 SH |
1107 | return ret; |
1108 | } | |
1109 | EXPORT_SYMBOL(rdma_create_qp); | |
1110 | ||
1111 | void rdma_destroy_qp(struct rdma_cm_id *id) | |
1112 | { | |
c5483388 SH |
1113 | struct rdma_id_private *id_priv; |
1114 | ||
1115 | id_priv = container_of(id, struct rdma_id_private, id); | |
ed999f82 | 1116 | trace_cm_qp_destroy(id_priv); |
c5483388 SH |
1117 | mutex_lock(&id_priv->qp_mutex); |
1118 | ib_destroy_qp(id_priv->id.qp); | |
1119 | id_priv->id.qp = NULL; | |
1120 | mutex_unlock(&id_priv->qp_mutex); | |
e51060f0 SH |
1121 | } |
1122 | EXPORT_SYMBOL(rdma_destroy_qp); | |
1123 | ||
5851bb89 SH |
1124 | static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, |
1125 | struct rdma_conn_param *conn_param) | |
e51060f0 SH |
1126 | { |
1127 | struct ib_qp_attr qp_attr; | |
1128 | int qp_attr_mask, ret; | |
1129 | ||
c5483388 SH |
1130 | mutex_lock(&id_priv->qp_mutex); |
1131 | if (!id_priv->id.qp) { | |
1132 | ret = 0; | |
1133 | goto out; | |
1134 | } | |
e51060f0 SH |
1135 | |
1136 | /* Need to update QP attributes from default values. */ | |
1137 | qp_attr.qp_state = IB_QPS_INIT; | |
c5483388 | 1138 | ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); |
e51060f0 | 1139 | if (ret) |
c5483388 | 1140 | goto out; |
e51060f0 | 1141 | |
c5483388 | 1142 | ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); |
e51060f0 | 1143 | if (ret) |
c5483388 | 1144 | goto out; |
e51060f0 SH |
1145 | |
1146 | qp_attr.qp_state = IB_QPS_RTR; | |
c5483388 | 1147 | ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); |
e51060f0 | 1148 | if (ret) |
c5483388 | 1149 | goto out; |
e51060f0 | 1150 | |
fef60902 MW |
1151 | BUG_ON(id_priv->cma_dev->device != id_priv->id.device); |
1152 | ||
5851bb89 SH |
1153 | if (conn_param) |
1154 | qp_attr.max_dest_rd_atomic = conn_param->responder_resources; | |
c5483388 SH |
1155 | ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); |
1156 | out: | |
1157 | mutex_unlock(&id_priv->qp_mutex); | |
1158 | return ret; | |
e51060f0 SH |
1159 | } |
1160 | ||
5851bb89 SH |
1161 | static int cma_modify_qp_rts(struct rdma_id_private *id_priv, |
1162 | struct rdma_conn_param *conn_param) | |
e51060f0 SH |
1163 | { |
1164 | struct ib_qp_attr qp_attr; | |
1165 | int qp_attr_mask, ret; | |
1166 | ||
c5483388 SH |
1167 | mutex_lock(&id_priv->qp_mutex); |
1168 | if (!id_priv->id.qp) { | |
1169 | ret = 0; | |
1170 | goto out; | |
1171 | } | |
e51060f0 SH |
1172 | |
1173 | qp_attr.qp_state = IB_QPS_RTS; | |
c5483388 | 1174 | ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); |
e51060f0 | 1175 | if (ret) |
c5483388 | 1176 | goto out; |
e51060f0 | 1177 | |
5851bb89 SH |
1178 | if (conn_param) |
1179 | qp_attr.max_rd_atomic = conn_param->initiator_depth; | |
c5483388 SH |
1180 | ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); |
1181 | out: | |
1182 | mutex_unlock(&id_priv->qp_mutex); | |
1183 | return ret; | |
e51060f0 SH |
1184 | } |
1185 | ||
c5483388 | 1186 | static int cma_modify_qp_err(struct rdma_id_private *id_priv) |
e51060f0 SH |
1187 | { |
1188 | struct ib_qp_attr qp_attr; | |
c5483388 | 1189 | int ret; |
e51060f0 | 1190 | |
c5483388 SH |
1191 | mutex_lock(&id_priv->qp_mutex); |
1192 | if (!id_priv->id.qp) { | |
1193 | ret = 0; | |
1194 | goto out; | |
1195 | } | |
e51060f0 SH |
1196 | |
1197 | qp_attr.qp_state = IB_QPS_ERR; | |
c5483388 SH |
1198 | ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); |
1199 | out: | |
1200 | mutex_unlock(&id_priv->qp_mutex); | |
1201 | return ret; | |
e51060f0 SH |
1202 | } |
1203 | ||
c8f6a362 SH |
1204 | static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, |
1205 | struct ib_qp_attr *qp_attr, int *qp_attr_mask) | |
1206 | { | |
1207 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; | |
1208 | int ret; | |
3c86aa70 EC |
1209 | u16 pkey; |
1210 | ||
227128fc | 1211 | if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num)) |
3c86aa70 | 1212 | pkey = 0xffff; |
fef60902 MW |
1213 | else |
1214 | pkey = ib_addr_get_pkey(dev_addr); | |
c8f6a362 SH |
1215 | |
1216 | ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, | |
3c86aa70 | 1217 | pkey, &qp_attr->pkey_index); |
c8f6a362 SH |
1218 | if (ret) |
1219 | return ret; | |
1220 | ||
1221 | qp_attr->port_num = id_priv->id.port_num; | |
1222 | *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; | |
1223 | ||
b26f9b99 | 1224 | if (id_priv->id.qp_type == IB_QPT_UD) { |
5c438135 | 1225 | ret = cma_set_qkey(id_priv, 0); |
d2ca39f2 YE |
1226 | if (ret) |
1227 | return ret; | |
1228 | ||
c8f6a362 SH |
1229 | qp_attr->qkey = id_priv->qkey; |
1230 | *qp_attr_mask |= IB_QP_QKEY; | |
1231 | } else { | |
1232 | qp_attr->qp_access_flags = 0; | |
1233 | *qp_attr_mask |= IB_QP_ACCESS_FLAGS; | |
1234 | } | |
1235 | return 0; | |
1236 | } | |
1237 | ||
e51060f0 SH |
1238 | int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, |
1239 | int *qp_attr_mask) | |
1240 | { | |
1241 | struct rdma_id_private *id_priv; | |
c8f6a362 | 1242 | int ret = 0; |
e51060f0 SH |
1243 | |
1244 | id_priv = container_of(id, struct rdma_id_private, id); | |
72219cea | 1245 | if (rdma_cap_ib_cm(id->device, id->port_num)) { |
b26f9b99 | 1246 | if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) |
c8f6a362 SH |
1247 | ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); |
1248 | else | |
1249 | ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, | |
1250 | qp_attr_mask); | |
dd5f03be | 1251 | |
e51060f0 SH |
1252 | if (qp_attr->qp_state == IB_QPS_RTR) |
1253 | qp_attr->rq_psn = id_priv->seq_num; | |
04215330 | 1254 | } else if (rdma_cap_iw_cm(id->device, id->port_num)) { |
c8f6a362 | 1255 | if (!id_priv->cm_id.iw) { |
8f076531 | 1256 | qp_attr->qp_access_flags = 0; |
c8f6a362 SH |
1257 | *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; |
1258 | } else | |
1259 | ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, | |
1260 | qp_attr_mask); | |
a62ab66b IM |
1261 | qp_attr->port_num = id_priv->id.port_num; |
1262 | *qp_attr_mask |= IB_QP_PORT; | |
b6eb7011 | 1263 | } else { |
e51060f0 | 1264 | ret = -ENOSYS; |
b6eb7011 | 1265 | } |
e51060f0 | 1266 | |
2c1619ed DG |
1267 | if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set) |
1268 | qp_attr->timeout = id_priv->timeout; | |
1269 | ||
3aeffc46 HB |
1270 | if ((*qp_attr_mask & IB_QP_MIN_RNR_TIMER) && id_priv->min_rnr_timer_set) |
1271 | qp_attr->min_rnr_timer = id_priv->min_rnr_timer; | |
1272 | ||
e51060f0 SH |
1273 | return ret; |
1274 | } | |
1275 | EXPORT_SYMBOL(rdma_init_qp_attr); | |
1276 | ||
ca3a8ace | 1277 | static inline bool cma_zero_addr(const struct sockaddr *addr) |
e51060f0 | 1278 | { |
2e2d190c SH |
1279 | switch (addr->sa_family) { |
1280 | case AF_INET: | |
1281 | return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr); | |
1282 | case AF_INET6: | |
ca3a8ace | 1283 | return ipv6_addr_any(&((struct sockaddr_in6 *)addr)->sin6_addr); |
2e2d190c | 1284 | case AF_IB: |
ca3a8ace | 1285 | return ib_addr_any(&((struct sockaddr_ib *)addr)->sib_addr); |
2e2d190c | 1286 | default: |
ca3a8ace | 1287 | return false; |
e51060f0 SH |
1288 | } |
1289 | } | |
1290 | ||
ca3a8ace | 1291 | static inline bool cma_loopback_addr(const struct sockaddr *addr) |
e51060f0 | 1292 | { |
2e2d190c SH |
1293 | switch (addr->sa_family) { |
1294 | case AF_INET: | |
ca3a8ace PP |
1295 | return ipv4_is_loopback( |
1296 | ((struct sockaddr_in *)addr)->sin_addr.s_addr); | |
2e2d190c | 1297 | case AF_INET6: |
ca3a8ace PP |
1298 | return ipv6_addr_loopback( |
1299 | &((struct sockaddr_in6 *)addr)->sin6_addr); | |
2e2d190c | 1300 | case AF_IB: |
ca3a8ace PP |
1301 | return ib_addr_loopback( |
1302 | &((struct sockaddr_ib *)addr)->sib_addr); | |
2e2d190c | 1303 | default: |
ca3a8ace | 1304 | return false; |
2e2d190c | 1305 | } |
e51060f0 SH |
1306 | } |
1307 | ||
ca3a8ace | 1308 | static inline bool cma_any_addr(const struct sockaddr *addr) |
e51060f0 SH |
1309 | { |
1310 | return cma_zero_addr(addr) || cma_loopback_addr(addr); | |
1311 | } | |
1312 | ||
5d7ed2f2 | 1313 | static int cma_addr_cmp(const struct sockaddr *src, const struct sockaddr *dst) |
43b752da HS |
1314 | { |
1315 | if (src->sa_family != dst->sa_family) | |
1316 | return -1; | |
1317 | ||
1318 | switch (src->sa_family) { | |
1319 | case AF_INET: | |
5d7ed2f2 PP |
1320 | return ((struct sockaddr_in *)src)->sin_addr.s_addr != |
1321 | ((struct sockaddr_in *)dst)->sin_addr.s_addr; | |
1322 | case AF_INET6: { | |
1323 | struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)src; | |
1324 | struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst; | |
1325 | bool link_local; | |
1326 | ||
1327 | if (ipv6_addr_cmp(&src_addr6->sin6_addr, | |
1328 | &dst_addr6->sin6_addr)) | |
1329 | return 1; | |
1330 | link_local = ipv6_addr_type(&dst_addr6->sin6_addr) & | |
1331 | IPV6_ADDR_LINKLOCAL; | |
1332 | /* Link local must match their scope_ids */ | |
1333 | return link_local ? (src_addr6->sin6_scope_id != | |
1334 | dst_addr6->sin6_scope_id) : | |
1335 | 0; | |
1336 | } | |
1337 | ||
2e2d190c SH |
1338 | default: |
1339 | return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr, | |
1340 | &((struct sockaddr_ib *) dst)->sib_addr); | |
43b752da HS |
1341 | } |
1342 | } | |
1343 | ||
2df7dba8 | 1344 | static __be16 cma_port(const struct sockaddr *addr) |
628e5f6d | 1345 | { |
58afdcb7 SH |
1346 | struct sockaddr_ib *sib; |
1347 | ||
1348 | switch (addr->sa_family) { | |
1349 | case AF_INET: | |
628e5f6d | 1350 | return ((struct sockaddr_in *) addr)->sin_port; |
58afdcb7 | 1351 | case AF_INET6: |
628e5f6d | 1352 | return ((struct sockaddr_in6 *) addr)->sin6_port; |
58afdcb7 SH |
1353 | case AF_IB: |
1354 | sib = (struct sockaddr_ib *) addr; | |
1355 | return htons((u16) (be64_to_cpu(sib->sib_sid) & | |
1356 | be64_to_cpu(sib->sib_sid_mask))); | |
1357 | default: | |
1358 | return 0; | |
1359 | } | |
628e5f6d SH |
1360 | } |
1361 | ||
2df7dba8 | 1362 | static inline int cma_any_port(const struct sockaddr *addr) |
e51060f0 | 1363 | { |
628e5f6d | 1364 | return !cma_port(addr); |
e51060f0 SH |
1365 | } |
1366 | ||
0c505f70 HE |
1367 | static void cma_save_ib_info(struct sockaddr *src_addr, |
1368 | struct sockaddr *dst_addr, | |
e7ff98ae PP |
1369 | const struct rdma_cm_id *listen_id, |
1370 | const struct sa_path_rec *path) | |
e51060f0 | 1371 | { |
fbaa1a6d SH |
1372 | struct sockaddr_ib *listen_ib, *ib; |
1373 | ||
1374 | listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr; | |
0c505f70 HE |
1375 | if (src_addr) { |
1376 | ib = (struct sockaddr_ib *)src_addr; | |
1377 | ib->sib_family = AF_IB; | |
1378 | if (path) { | |
1379 | ib->sib_pkey = path->pkey; | |
1380 | ib->sib_flowinfo = path->flow_label; | |
1381 | memcpy(&ib->sib_addr, &path->sgid, 16); | |
d3957b86 | 1382 | ib->sib_sid = path->service_id; |
0c505f70 HE |
1383 | ib->sib_scope_id = 0; |
1384 | } else { | |
1385 | ib->sib_pkey = listen_ib->sib_pkey; | |
1386 | ib->sib_flowinfo = listen_ib->sib_flowinfo; | |
1387 | ib->sib_addr = listen_ib->sib_addr; | |
1388 | ib->sib_sid = listen_ib->sib_sid; | |
1389 | ib->sib_scope_id = listen_ib->sib_scope_id; | |
1390 | } | |
1391 | ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL); | |
c07678bb | 1392 | } |
0c505f70 HE |
1393 | if (dst_addr) { |
1394 | ib = (struct sockaddr_ib *)dst_addr; | |
1395 | ib->sib_family = AF_IB; | |
1396 | if (path) { | |
1397 | ib->sib_pkey = path->pkey; | |
1398 | ib->sib_flowinfo = path->flow_label; | |
1399 | memcpy(&ib->sib_addr, &path->dgid, 16); | |
1400 | } | |
c07678bb | 1401 | } |
fbaa1a6d | 1402 | } |
e51060f0 | 1403 | |
c50e90d0 AB |
1404 | static void cma_save_ip4_info(struct sockaddr_in *src_addr, |
1405 | struct sockaddr_in *dst_addr, | |
0c505f70 HE |
1406 | struct cma_hdr *hdr, |
1407 | __be16 local_port) | |
fbaa1a6d | 1408 | { |
0c505f70 | 1409 | if (src_addr) { |
c50e90d0 AB |
1410 | *src_addr = (struct sockaddr_in) { |
1411 | .sin_family = AF_INET, | |
1412 | .sin_addr.s_addr = hdr->dst_addr.ip4.addr, | |
1413 | .sin_port = local_port, | |
1414 | }; | |
0c505f70 | 1415 | } |
fbaa1a6d | 1416 | |
0c505f70 | 1417 | if (dst_addr) { |
c50e90d0 AB |
1418 | *dst_addr = (struct sockaddr_in) { |
1419 | .sin_family = AF_INET, | |
1420 | .sin_addr.s_addr = hdr->src_addr.ip4.addr, | |
1421 | .sin_port = hdr->port, | |
1422 | }; | |
0c505f70 | 1423 | } |
e51060f0 SH |
1424 | } |
1425 | ||
c50e90d0 AB |
1426 | static void cma_save_ip6_info(struct sockaddr_in6 *src_addr, |
1427 | struct sockaddr_in6 *dst_addr, | |
0c505f70 HE |
1428 | struct cma_hdr *hdr, |
1429 | __be16 local_port) | |
e51060f0 | 1430 | { |
0c505f70 | 1431 | if (src_addr) { |
c50e90d0 AB |
1432 | *src_addr = (struct sockaddr_in6) { |
1433 | .sin6_family = AF_INET6, | |
1434 | .sin6_addr = hdr->dst_addr.ip6, | |
1435 | .sin6_port = local_port, | |
1436 | }; | |
0c505f70 | 1437 | } |
fbaa1a6d | 1438 | |
0c505f70 | 1439 | if (dst_addr) { |
c50e90d0 AB |
1440 | *dst_addr = (struct sockaddr_in6) { |
1441 | .sin6_family = AF_INET6, | |
1442 | .sin6_addr = hdr->src_addr.ip6, | |
1443 | .sin6_port = hdr->port, | |
1444 | }; | |
0c505f70 | 1445 | } |
fbaa1a6d SH |
1446 | } |
1447 | ||
0c505f70 | 1448 | static u16 cma_port_from_service_id(__be64 service_id) |
fbaa1a6d | 1449 | { |
0c505f70 HE |
1450 | return (u16)be64_to_cpu(service_id); |
1451 | } | |
fbaa1a6d | 1452 | |
0c505f70 HE |
1453 | static int cma_save_ip_info(struct sockaddr *src_addr, |
1454 | struct sockaddr *dst_addr, | |
e7ff98ae | 1455 | const struct ib_cm_event *ib_event, |
0c505f70 HE |
1456 | __be64 service_id) |
1457 | { | |
1458 | struct cma_hdr *hdr; | |
1459 | __be16 port; | |
fbaa1a6d SH |
1460 | |
1461 | hdr = ib_event->private_data; | |
1462 | if (hdr->cma_version != CMA_VERSION) | |
1463 | return -EINVAL; | |
1464 | ||
0c505f70 HE |
1465 | port = htons(cma_port_from_service_id(service_id)); |
1466 | ||
fbaa1a6d | 1467 | switch (cma_get_ip_ver(hdr)) { |
e51060f0 | 1468 | case 4: |
c50e90d0 AB |
1469 | cma_save_ip4_info((struct sockaddr_in *)src_addr, |
1470 | (struct sockaddr_in *)dst_addr, hdr, port); | |
e51060f0 SH |
1471 | break; |
1472 | case 6: | |
c50e90d0 AB |
1473 | cma_save_ip6_info((struct sockaddr_in6 *)src_addr, |
1474 | (struct sockaddr_in6 *)dst_addr, hdr, port); | |
e51060f0 SH |
1475 | break; |
1476 | default: | |
4c21b5bc | 1477 | return -EAFNOSUPPORT; |
e51060f0 | 1478 | } |
0c505f70 | 1479 | |
fbaa1a6d | 1480 | return 0; |
e51060f0 SH |
1481 | } |
1482 | ||
0c505f70 HE |
1483 | static int cma_save_net_info(struct sockaddr *src_addr, |
1484 | struct sockaddr *dst_addr, | |
e7ff98ae PP |
1485 | const struct rdma_cm_id *listen_id, |
1486 | const struct ib_cm_event *ib_event, | |
0c505f70 HE |
1487 | sa_family_t sa_family, __be64 service_id) |
1488 | { | |
1489 | if (sa_family == AF_IB) { | |
1490 | if (ib_event->event == IB_CM_REQ_RECEIVED) | |
1491 | cma_save_ib_info(src_addr, dst_addr, listen_id, | |
1492 | ib_event->param.req_rcvd.primary_path); | |
1493 | else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) | |
1494 | cma_save_ib_info(src_addr, dst_addr, listen_id, NULL); | |
1495 | return 0; | |
1496 | } | |
1497 | ||
1498 | return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id); | |
1499 | } | |
1500 | ||
4c21b5bc HE |
1501 | static int cma_save_req_info(const struct ib_cm_event *ib_event, |
1502 | struct cma_req_info *req) | |
1503 | { | |
1504 | const struct ib_cm_req_event_param *req_param = | |
1505 | &ib_event->param.req_rcvd; | |
1506 | const struct ib_cm_sidr_req_event_param *sidr_param = | |
1507 | &ib_event->param.sidr_req_rcvd; | |
1508 | ||
1509 | switch (ib_event->event) { | |
1510 | case IB_CM_REQ_RECEIVED: | |
1511 | req->device = req_param->listen_id->device; | |
1512 | req->port = req_param->port; | |
1513 | memcpy(&req->local_gid, &req_param->primary_path->sgid, | |
1514 | sizeof(req->local_gid)); | |
1515 | req->has_gid = true; | |
d3957b86 | 1516 | req->service_id = req_param->primary_path->service_id; |
ab3964ad | 1517 | req->pkey = be16_to_cpu(req_param->primary_path->pkey); |
84424a7f HE |
1518 | if (req->pkey != req_param->bth_pkey) |
1519 | pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" | |
1520 | "RDMA CMA: in the future this may cause the request to be dropped\n", | |
1521 | req_param->bth_pkey, req->pkey); | |
4c21b5bc HE |
1522 | break; |
1523 | case IB_CM_SIDR_REQ_RECEIVED: | |
1524 | req->device = sidr_param->listen_id->device; | |
1525 | req->port = sidr_param->port; | |
1526 | req->has_gid = false; | |
1527 | req->service_id = sidr_param->service_id; | |
ab3964ad | 1528 | req->pkey = sidr_param->pkey; |
84424a7f HE |
1529 | if (req->pkey != sidr_param->bth_pkey) |
1530 | pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n" | |
1531 | "RDMA CMA: in the future this may cause the request to be dropped\n", | |
1532 | sidr_param->bth_pkey, req->pkey); | |
4c21b5bc HE |
1533 | break; |
1534 | default: | |
1535 | return -EINVAL; | |
1536 | } | |
1537 | ||
1538 | return 0; | |
1539 | } | |
1540 | ||
f887f2ac HE |
1541 | static bool validate_ipv4_net_dev(struct net_device *net_dev, |
1542 | const struct sockaddr_in *dst_addr, | |
1543 | const struct sockaddr_in *src_addr) | |
1544 | { | |
1545 | __be32 daddr = dst_addr->sin_addr.s_addr, | |
1546 | saddr = src_addr->sin_addr.s_addr; | |
1547 | struct fib_result res; | |
1548 | struct flowi4 fl4; | |
1549 | int err; | |
1550 | bool ret; | |
1551 | ||
1552 | if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || | |
1553 | ipv4_is_lbcast(daddr) || ipv4_is_zeronet(saddr) || | |
1554 | ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr) || | |
1555 | ipv4_is_loopback(saddr)) | |
1556 | return false; | |
1557 | ||
1558 | memset(&fl4, 0, sizeof(fl4)); | |
eb83f502 | 1559 | fl4.flowi4_oif = net_dev->ifindex; |
f887f2ac HE |
1560 | fl4.daddr = daddr; |
1561 | fl4.saddr = saddr; | |
1562 | ||
1563 | rcu_read_lock(); | |
1564 | err = fib_lookup(dev_net(net_dev), &fl4, &res, 0); | |
d3632493 | 1565 | ret = err == 0 && FIB_RES_DEV(res) == net_dev; |
f887f2ac HE |
1566 | rcu_read_unlock(); |
1567 | ||
1568 | return ret; | |
1569 | } | |
1570 | ||
1571 | static bool validate_ipv6_net_dev(struct net_device *net_dev, | |
1572 | const struct sockaddr_in6 *dst_addr, | |
1573 | const struct sockaddr_in6 *src_addr) | |
1574 | { | |
1575 | #if IS_ENABLED(CONFIG_IPV6) | |
1576 | const int strict = ipv6_addr_type(&dst_addr->sin6_addr) & | |
1577 | IPV6_ADDR_LINKLOCAL; | |
1578 | struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr, | |
1579 | &src_addr->sin6_addr, net_dev->ifindex, | |
b75cc8f9 | 1580 | NULL, strict); |
f887f2ac HE |
1581 | bool ret; |
1582 | ||
1583 | if (!rt) | |
1584 | return false; | |
1585 | ||
1586 | ret = rt->rt6i_idev->dev == net_dev; | |
1587 | ip6_rt_put(rt); | |
1588 | ||
1589 | return ret; | |
1590 | #else | |
1591 | return false; | |
1592 | #endif | |
1593 | } | |
1594 | ||
1595 | static bool validate_net_dev(struct net_device *net_dev, | |
1596 | const struct sockaddr *daddr, | |
1597 | const struct sockaddr *saddr) | |
1598 | { | |
1599 | const struct sockaddr_in *daddr4 = (const struct sockaddr_in *)daddr; | |
1600 | const struct sockaddr_in *saddr4 = (const struct sockaddr_in *)saddr; | |
1601 | const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr; | |
1602 | const struct sockaddr_in6 *saddr6 = (const struct sockaddr_in6 *)saddr; | |
1603 | ||
1604 | switch (daddr->sa_family) { | |
1605 | case AF_INET: | |
1606 | return saddr->sa_family == AF_INET && | |
1607 | validate_ipv4_net_dev(net_dev, daddr4, saddr4); | |
1608 | ||
1609 | case AF_INET6: | |
1610 | return saddr->sa_family == AF_INET6 && | |
1611 | validate_ipv6_net_dev(net_dev, daddr6, saddr6); | |
1612 | ||
1613 | default: | |
1614 | return false; | |
1615 | } | |
1616 | } | |
1617 | ||
cee10433 PP |
1618 | static struct net_device * |
1619 | roce_get_net_dev_by_cm_event(const struct ib_cm_event *ib_event) | |
1620 | { | |
1621 | const struct ib_gid_attr *sgid_attr = NULL; | |
adb4a57a | 1622 | struct net_device *ndev; |
cee10433 PP |
1623 | |
1624 | if (ib_event->event == IB_CM_REQ_RECEIVED) | |
1625 | sgid_attr = ib_event->param.req_rcvd.ppath_sgid_attr; | |
1626 | else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) | |
1627 | sgid_attr = ib_event->param.sidr_req_rcvd.sgid_attr; | |
1628 | ||
1629 | if (!sgid_attr) | |
1630 | return NULL; | |
adb4a57a PP |
1631 | |
1632 | rcu_read_lock(); | |
1633 | ndev = rdma_read_gid_attr_ndev_rcu(sgid_attr); | |
1634 | if (IS_ERR(ndev)) | |
1635 | ndev = NULL; | |
1636 | else | |
1637 | dev_hold(ndev); | |
1638 | rcu_read_unlock(); | |
1639 | return ndev; | |
cee10433 PP |
1640 | } |
1641 | ||
e7ff98ae | 1642 | static struct net_device *cma_get_net_dev(const struct ib_cm_event *ib_event, |
2918c1a9 | 1643 | struct cma_req_info *req) |
4c21b5bc | 1644 | { |
2918c1a9 PP |
1645 | struct sockaddr *listen_addr = |
1646 | (struct sockaddr *)&req->listen_addr_storage; | |
1647 | struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage; | |
4c21b5bc HE |
1648 | struct net_device *net_dev; |
1649 | const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL; | |
1650 | int err; | |
1651 | ||
f887f2ac HE |
1652 | err = cma_save_ip_info(listen_addr, src_addr, ib_event, |
1653 | req->service_id); | |
4c21b5bc HE |
1654 | if (err) |
1655 | return ERR_PTR(err); | |
1656 | ||
cee10433 PP |
1657 | if (rdma_protocol_roce(req->device, req->port)) |
1658 | net_dev = roce_get_net_dev_by_cm_event(ib_event); | |
1659 | else | |
1660 | net_dev = ib_get_net_dev_by_params(req->device, req->port, | |
1661 | req->pkey, | |
1662 | gid, listen_addr); | |
4c21b5bc HE |
1663 | if (!net_dev) |
1664 | return ERR_PTR(-ENODEV); | |
1665 | ||
1666 | return net_dev; | |
1667 | } | |
1668 | ||
2253fc0c | 1669 | static enum rdma_ucm_port_space rdma_ps_from_service_id(__be64 service_id) |
4c21b5bc HE |
1670 | { |
1671 | return (be64_to_cpu(service_id) >> 16) & 0xffff; | |
1672 | } | |
1673 | ||
1674 | static bool cma_match_private_data(struct rdma_id_private *id_priv, | |
1675 | const struct cma_hdr *hdr) | |
1676 | { | |
1677 | struct sockaddr *addr = cma_src_addr(id_priv); | |
1678 | __be32 ip4_addr; | |
1679 | struct in6_addr ip6_addr; | |
1680 | ||
1681 | if (cma_any_addr(addr) && !id_priv->afonly) | |
1682 | return true; | |
1683 | ||
1684 | switch (addr->sa_family) { | |
1685 | case AF_INET: | |
1686 | ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr; | |
1687 | if (cma_get_ip_ver(hdr) != 4) | |
1688 | return false; | |
1689 | if (!cma_any_addr(addr) && | |
1690 | hdr->dst_addr.ip4.addr != ip4_addr) | |
1691 | return false; | |
1692 | break; | |
1693 | case AF_INET6: | |
1694 | ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr; | |
1695 | if (cma_get_ip_ver(hdr) != 6) | |
1696 | return false; | |
1697 | if (!cma_any_addr(addr) && | |
1698 | memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr))) | |
1699 | return false; | |
1700 | break; | |
1701 | case AF_IB: | |
1702 | return true; | |
1703 | default: | |
1704 | return false; | |
1705 | } | |
1706 | ||
1707 | return true; | |
1708 | } | |
1709 | ||
b8cab5da HE |
1710 | static bool cma_protocol_roce(const struct rdma_cm_id *id) |
1711 | { | |
1712 | struct ib_device *device = id->device; | |
1fb7f897 | 1713 | const u32 port_num = id->port_num ?: rdma_start_port(device); |
b8cab5da | 1714 | |
5ac08a34 | 1715 | return rdma_protocol_roce(device, port_num); |
b8cab5da HE |
1716 | } |
1717 | ||
78fb282b PP |
1718 | static bool cma_is_req_ipv6_ll(const struct cma_req_info *req) |
1719 | { | |
1720 | const struct sockaddr *daddr = | |
1721 | (const struct sockaddr *)&req->listen_addr_storage; | |
1722 | const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr; | |
1723 | ||
1724 | /* Returns true if the req is for IPv6 link local */ | |
1725 | return (daddr->sa_family == AF_INET6 && | |
1726 | (ipv6_addr_type(&daddr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)); | |
1727 | } | |
1728 | ||
fac51590 MB |
1729 | static bool cma_match_net_dev(const struct rdma_cm_id *id, |
1730 | const struct net_device *net_dev, | |
78fb282b | 1731 | const struct cma_req_info *req) |
4c21b5bc | 1732 | { |
fac51590 | 1733 | const struct rdma_addr *addr = &id->route.addr; |
4c21b5bc HE |
1734 | |
1735 | if (!net_dev) | |
d274e45c | 1736 | /* This request is an AF_IB request */ |
78fb282b | 1737 | return (!id->port_num || id->port_num == req->port) && |
d274e45c | 1738 | (addr->src_addr.ss_family == AF_IB); |
4c21b5bc | 1739 | |
78fb282b PP |
1740 | /* |
1741 | * If the request is not for IPv6 link local, allow matching | |
1742 | * request to any netdevice of the one or multiport rdma device. | |
1743 | */ | |
1744 | if (!cma_is_req_ipv6_ll(req)) | |
1745 | return true; | |
643d213a PP |
1746 | /* |
1747 | * Net namespaces must match, and if the listner is listening | |
1748 | * on a specific netdevice than netdevice must match as well. | |
1749 | */ | |
1750 | if (net_eq(dev_net(net_dev), addr->dev_addr.net) && | |
1751 | (!!addr->dev_addr.bound_dev_if == | |
1752 | (addr->dev_addr.bound_dev_if == net_dev->ifindex))) | |
1753 | return true; | |
1754 | else | |
1755 | return false; | |
4c21b5bc HE |
1756 | } |
1757 | ||
1758 | static struct rdma_id_private *cma_find_listener( | |
1759 | const struct rdma_bind_list *bind_list, | |
1760 | const struct ib_cm_id *cm_id, | |
1761 | const struct ib_cm_event *ib_event, | |
1762 | const struct cma_req_info *req, | |
1763 | const struct net_device *net_dev) | |
1764 | { | |
1765 | struct rdma_id_private *id_priv, *id_priv_dev; | |
1766 | ||
730c8912 MZ |
1767 | lockdep_assert_held(&lock); |
1768 | ||
4c21b5bc HE |
1769 | if (!bind_list) |
1770 | return ERR_PTR(-EINVAL); | |
1771 | ||
1772 | hlist_for_each_entry(id_priv, &bind_list->owners, node) { | |
1773 | if (cma_match_private_data(id_priv, ib_event->private_data)) { | |
1774 | if (id_priv->id.device == cm_id->device && | |
78fb282b | 1775 | cma_match_net_dev(&id_priv->id, net_dev, req)) |
4c21b5bc HE |
1776 | return id_priv; |
1777 | list_for_each_entry(id_priv_dev, | |
1778 | &id_priv->listen_list, | |
99cfddb8 | 1779 | listen_item) { |
4c21b5bc | 1780 | if (id_priv_dev->id.device == cm_id->device && |
78fb282b PP |
1781 | cma_match_net_dev(&id_priv_dev->id, |
1782 | net_dev, req)) | |
4c21b5bc HE |
1783 | return id_priv_dev; |
1784 | } | |
1785 | } | |
1786 | } | |
1787 | ||
1788 | return ERR_PTR(-EINVAL); | |
1789 | } | |
1790 | ||
e7ff98ae | 1791 | static struct rdma_id_private * |
85463316 PP |
1792 | cma_ib_id_from_event(struct ib_cm_id *cm_id, |
1793 | const struct ib_cm_event *ib_event, | |
41ab1cb7 | 1794 | struct cma_req_info *req, |
85463316 | 1795 | struct net_device **net_dev) |
4c21b5bc | 1796 | { |
4c21b5bc HE |
1797 | struct rdma_bind_list *bind_list; |
1798 | struct rdma_id_private *id_priv; | |
4c21b5bc HE |
1799 | int err; |
1800 | ||
41ab1cb7 | 1801 | err = cma_save_req_info(ib_event, req); |
4c21b5bc HE |
1802 | if (err) |
1803 | return ERR_PTR(err); | |
1804 | ||
41ab1cb7 | 1805 | *net_dev = cma_get_net_dev(ib_event, req); |
0b3ca768 HE |
1806 | if (IS_ERR(*net_dev)) { |
1807 | if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) { | |
4c21b5bc | 1808 | /* Assuming the protocol is AF_IB */ |
0b3ca768 | 1809 | *net_dev = NULL; |
4c21b5bc | 1810 | } else { |
0b3ca768 | 1811 | return ERR_CAST(*net_dev); |
4c21b5bc HE |
1812 | } |
1813 | } | |
1814 | ||
730c8912 | 1815 | mutex_lock(&lock); |
2918c1a9 PP |
1816 | /* |
1817 | * Net namespace might be getting deleted while route lookup, | |
1818 | * cm_id lookup is in progress. Therefore, perform netdevice | |
1819 | * validation, cm_id lookup under rcu lock. | |
1820 | * RCU lock along with netdevice state check, synchronizes with | |
1821 | * netdevice migrating to different net namespace and also avoids | |
1822 | * case where net namespace doesn't get deleted while lookup is in | |
1823 | * progress. | |
1824 | * If the device state is not IFF_UP, its properties such as ifindex | |
1825 | * and nd_net cannot be trusted to remain valid without rcu lock. | |
1826 | * net/core/dev.c change_net_namespace() ensures to synchronize with | |
1827 | * ongoing operations on net device after device is closed using | |
1828 | * synchronize_net(). | |
1829 | */ | |
1830 | rcu_read_lock(); | |
1831 | if (*net_dev) { | |
1832 | /* | |
1833 | * If netdevice is down, it is likely that it is administratively | |
1834 | * down or it might be migrating to different namespace. | |
1835 | * In that case avoid further processing, as the net namespace | |
1836 | * or ifindex may change. | |
1837 | */ | |
1838 | if (((*net_dev)->flags & IFF_UP) == 0) { | |
1839 | id_priv = ERR_PTR(-EHOSTUNREACH); | |
1840 | goto err; | |
1841 | } | |
1842 | ||
1843 | if (!validate_net_dev(*net_dev, | |
27cfde79 MG |
1844 | (struct sockaddr *)&req->src_addr_storage, |
1845 | (struct sockaddr *)&req->listen_addr_storage)) { | |
2918c1a9 PP |
1846 | id_priv = ERR_PTR(-EHOSTUNREACH); |
1847 | goto err; | |
1848 | } | |
1849 | } | |
1850 | ||
fa20105e | 1851 | bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net, |
41ab1cb7 PP |
1852 | rdma_ps_from_service_id(req->service_id), |
1853 | cma_port_from_service_id(req->service_id)); | |
1854 | id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev); | |
2918c1a9 PP |
1855 | err: |
1856 | rcu_read_unlock(); | |
730c8912 | 1857 | mutex_unlock(&lock); |
b3b51f9f | 1858 | if (IS_ERR(id_priv) && *net_dev) { |
be688195 HE |
1859 | dev_put(*net_dev); |
1860 | *net_dev = NULL; | |
1861 | } | |
4c21b5bc HE |
1862 | return id_priv; |
1863 | } | |
1864 | ||
c0b64f58 | 1865 | static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv) |
e51060f0 | 1866 | { |
e8160e15 | 1867 | return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); |
e51060f0 SH |
1868 | } |
1869 | ||
e51060f0 SH |
1870 | static void cma_cancel_route(struct rdma_id_private *id_priv) |
1871 | { | |
fe53ba2f | 1872 | if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) { |
e51060f0 SH |
1873 | if (id_priv->query) |
1874 | ib_sa_cancel_query(id_priv->query_id, id_priv->query); | |
e51060f0 SH |
1875 | } |
1876 | } | |
1877 | ||
ca465e1f | 1878 | static void _cma_cancel_listens(struct rdma_id_private *id_priv) |
e51060f0 SH |
1879 | { |
1880 | struct rdma_id_private *dev_id_priv; | |
1881 | ||
ca465e1f TL |
1882 | lockdep_assert_held(&lock); |
1883 | ||
d02d1f53 SH |
1884 | /* |
1885 | * Remove from listen_any_list to prevent added devices from spawning | |
1886 | * additional listen requests. | |
1887 | */ | |
99cfddb8 | 1888 | list_del_init(&id_priv->listen_any_item); |
e51060f0 SH |
1889 | |
1890 | while (!list_empty(&id_priv->listen_list)) { | |
99cfddb8 JG |
1891 | dev_id_priv = |
1892 | list_first_entry(&id_priv->listen_list, | |
1893 | struct rdma_id_private, listen_item); | |
d02d1f53 | 1894 | /* sync with device removal to avoid duplicate destruction */ |
99cfddb8 JG |
1895 | list_del_init(&dev_id_priv->device_item); |
1896 | list_del_init(&dev_id_priv->listen_item); | |
d02d1f53 SH |
1897 | mutex_unlock(&lock); |
1898 | ||
1899 | rdma_destroy_id(&dev_id_priv->id); | |
1900 | mutex_lock(&lock); | |
e51060f0 | 1901 | } |
ca465e1f TL |
1902 | } |
1903 | ||
1904 | static void cma_cancel_listens(struct rdma_id_private *id_priv) | |
1905 | { | |
1906 | mutex_lock(&lock); | |
1907 | _cma_cancel_listens(id_priv); | |
e51060f0 SH |
1908 | mutex_unlock(&lock); |
1909 | } | |
1910 | ||
1911 | static void cma_cancel_operation(struct rdma_id_private *id_priv, | |
550e5ca7 | 1912 | enum rdma_cm_state state) |
e51060f0 SH |
1913 | { |
1914 | switch (state) { | |
550e5ca7 | 1915 | case RDMA_CM_ADDR_QUERY: |
305d568b JG |
1916 | /* |
1917 | * We can avoid doing the rdma_addr_cancel() based on state, | |
1918 | * only RDMA_CM_ADDR_QUERY has a work that could still execute. | |
1919 | * Notice that the addr_handler work could still be exiting | |
1920 | * outside this state, however due to the interaction with the | |
1921 | * handler_mutex the work is guaranteed not to touch id_priv | |
1922 | * during exit. | |
1923 | */ | |
e51060f0 SH |
1924 | rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); |
1925 | break; | |
550e5ca7 | 1926 | case RDMA_CM_ROUTE_QUERY: |
e51060f0 SH |
1927 | cma_cancel_route(id_priv); |
1928 | break; | |
550e5ca7 | 1929 | case RDMA_CM_LISTEN: |
f4753834 | 1930 | if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) |
e51060f0 SH |
1931 | cma_cancel_listens(id_priv); |
1932 | break; | |
1933 | default: | |
1934 | break; | |
1935 | } | |
1936 | } | |
1937 | ||
1938 | static void cma_release_port(struct rdma_id_private *id_priv) | |
1939 | { | |
1940 | struct rdma_bind_list *bind_list = id_priv->bind_list; | |
fa20105e | 1941 | struct net *net = id_priv->id.route.addr.dev_addr.net; |
e51060f0 SH |
1942 | |
1943 | if (!bind_list) | |
1944 | return; | |
1945 | ||
1946 | mutex_lock(&lock); | |
1947 | hlist_del(&id_priv->node); | |
1948 | if (hlist_empty(&bind_list->owners)) { | |
fa20105e | 1949 | cma_ps_remove(net, bind_list->ps, bind_list->port); |
e51060f0 SH |
1950 | kfree(bind_list); |
1951 | } | |
1952 | mutex_unlock(&lock); | |
1953 | } | |
1954 | ||
3788d299 JG |
1955 | static void destroy_mc(struct rdma_id_private *id_priv, |
1956 | struct cma_multicast *mc) | |
88145678 | 1957 | { |
2cc74e1e CL |
1958 | bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); |
1959 | ||
b5de0c60 JG |
1960 | if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num)) |
1961 | ib_sa_free_multicast(mc->sa_mc); | |
c0126915 | 1962 | |
b5de0c60 | 1963 | if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) { |
3788d299 JG |
1964 | struct rdma_dev_addr *dev_addr = |
1965 | &id_priv->id.route.addr.dev_addr; | |
1966 | struct net_device *ndev = NULL; | |
1967 | ||
1968 | if (dev_addr->bound_dev_if) | |
1969 | ndev = dev_get_by_index(dev_addr->net, | |
1970 | dev_addr->bound_dev_if); | |
d9e410eb MG |
1971 | if (ndev && !send_only) { |
1972 | enum ib_gid_type gid_type; | |
b5de0c60 JG |
1973 | union ib_gid mgid; |
1974 | ||
d9e410eb MG |
1975 | gid_type = id_priv->cma_dev->default_gid_type |
1976 | [id_priv->id.port_num - | |
1977 | rdma_start_port( | |
1978 | id_priv->cma_dev->device)]; | |
1979 | cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid, | |
1980 | gid_type); | |
1981 | cma_igmp_send(ndev, &mgid, false); | |
3788d299 | 1982 | } |
d9e410eb | 1983 | dev_put(ndev); |
fe454dc3 AH |
1984 | |
1985 | cancel_work_sync(&mc->iboe_join.work); | |
88145678 | 1986 | } |
b5de0c60 | 1987 | kfree(mc); |
88145678 PP |
1988 | } |
1989 | ||
c8f6a362 SH |
1990 | static void cma_leave_mc_groups(struct rdma_id_private *id_priv) |
1991 | { | |
1992 | struct cma_multicast *mc; | |
1993 | ||
1994 | while (!list_empty(&id_priv->mc_list)) { | |
3788d299 JG |
1995 | mc = list_first_entry(&id_priv->mc_list, struct cma_multicast, |
1996 | list); | |
c8f6a362 | 1997 | list_del(&mc->list); |
3788d299 | 1998 | destroy_mc(id_priv, mc); |
c8f6a362 SH |
1999 | } |
2000 | } | |
2001 | ||
f6a9d47a JG |
2002 | static void _destroy_id(struct rdma_id_private *id_priv, |
2003 | enum rdma_cm_state state) | |
e51060f0 | 2004 | { |
e51060f0 SH |
2005 | cma_cancel_operation(id_priv, state); |
2006 | ||
3d828754 | 2007 | rdma_restrack_del(&id_priv->res); |
fc008bdb | 2008 | cma_remove_id_from_tree(id_priv); |
e51060f0 | 2009 | if (id_priv->cma_dev) { |
72219cea | 2010 | if (rdma_cap_ib_cm(id_priv->id.device, 1)) { |
0c9361fc | 2011 | if (id_priv->cm_id.ib) |
e51060f0 | 2012 | ib_destroy_cm_id(id_priv->cm_id.ib); |
04215330 | 2013 | } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) { |
0c9361fc | 2014 | if (id_priv->cm_id.iw) |
07ebafba | 2015 | iw_destroy_cm_id(id_priv->cm_id.iw); |
e51060f0 | 2016 | } |
c8f6a362 | 2017 | cma_leave_mc_groups(id_priv); |
a396d43a | 2018 | cma_release_dev(id_priv); |
e51060f0 SH |
2019 | } |
2020 | ||
2021 | cma_release_port(id_priv); | |
e368d23f | 2022 | cma_id_put(id_priv); |
e51060f0 SH |
2023 | wait_for_completion(&id_priv->comp); |
2024 | ||
d02d1f53 | 2025 | if (id_priv->internal_id) |
e368d23f | 2026 | cma_id_put(id_priv->id.context); |
d02d1f53 | 2027 | |
e51060f0 | 2028 | kfree(id_priv->id.route.path_rec); |
5a374949 MZ |
2029 | kfree(id_priv->id.route.path_rec_inbound); |
2030 | kfree(id_priv->id.route.path_rec_outbound); | |
4ed13a5f | 2031 | |
fa20105e | 2032 | put_net(id_priv->id.route.addr.dev_addr.net); |
e51060f0 SH |
2033 | kfree(id_priv); |
2034 | } | |
f6a9d47a JG |
2035 | |
2036 | /* | |
2037 | * destroy an ID from within the handler_mutex. This ensures that no other | |
2038 | * handlers can start running concurrently. | |
2039 | */ | |
2040 | static void destroy_id_handler_unlock(struct rdma_id_private *id_priv) | |
2041 | __releases(&idprv->handler_mutex) | |
2042 | { | |
2043 | enum rdma_cm_state state; | |
2044 | unsigned long flags; | |
2045 | ||
2046 | trace_cm_id_destroy(id_priv); | |
2047 | ||
2048 | /* | |
2049 | * Setting the state to destroyed under the handler mutex provides a | |
2050 | * fence against calling handler callbacks. If this is invoked due to | |
2051 | * the failure of a handler callback then it guarentees that no future | |
2052 | * handlers will be called. | |
2053 | */ | |
2054 | lockdep_assert_held(&id_priv->handler_mutex); | |
2055 | spin_lock_irqsave(&id_priv->lock, flags); | |
2056 | state = id_priv->state; | |
2057 | id_priv->state = RDMA_CM_DESTROYING; | |
2058 | spin_unlock_irqrestore(&id_priv->lock, flags); | |
2059 | mutex_unlock(&id_priv->handler_mutex); | |
2060 | _destroy_id(id_priv, state); | |
2061 | } | |
2062 | ||
2063 | void rdma_destroy_id(struct rdma_cm_id *id) | |
2064 | { | |
2065 | struct rdma_id_private *id_priv = | |
2066 | container_of(id, struct rdma_id_private, id); | |
2067 | ||
2068 | mutex_lock(&id_priv->handler_mutex); | |
2069 | destroy_id_handler_unlock(id_priv); | |
2070 | } | |
e51060f0 SH |
2071 | EXPORT_SYMBOL(rdma_destroy_id); |
2072 | ||
2073 | static int cma_rep_recv(struct rdma_id_private *id_priv) | |
2074 | { | |
2075 | int ret; | |
2076 | ||
5851bb89 | 2077 | ret = cma_modify_qp_rtr(id_priv, NULL); |
e51060f0 SH |
2078 | if (ret) |
2079 | goto reject; | |
2080 | ||
5851bb89 | 2081 | ret = cma_modify_qp_rts(id_priv, NULL); |
e51060f0 SH |
2082 | if (ret) |
2083 | goto reject; | |
2084 | ||
ed999f82 | 2085 | trace_cm_send_rtu(id_priv); |
e51060f0 SH |
2086 | ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); |
2087 | if (ret) | |
2088 | goto reject; | |
2089 | ||
2090 | return 0; | |
2091 | reject: | |
498683c6 | 2092 | pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n", ret); |
c5483388 | 2093 | cma_modify_qp_err(id_priv); |
ed999f82 | 2094 | trace_cm_send_rej(id_priv); |
e51060f0 SH |
2095 | ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, |
2096 | NULL, 0, NULL, 0); | |
2097 | return ret; | |
2098 | } | |
2099 | ||
a1b1b61f | 2100 | static void cma_set_rep_event_data(struct rdma_cm_event *event, |
e7ff98ae | 2101 | const struct ib_cm_rep_event_param *rep_data, |
a1b1b61f SH |
2102 | void *private_data) |
2103 | { | |
2104 | event->param.conn.private_data = private_data; | |
2105 | event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; | |
2106 | event->param.conn.responder_resources = rep_data->responder_resources; | |
2107 | event->param.conn.initiator_depth = rep_data->initiator_depth; | |
2108 | event->param.conn.flow_control = rep_data->flow_control; | |
2109 | event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; | |
2110 | event->param.conn.srq = rep_data->srq; | |
2111 | event->param.conn.qp_num = rep_data->remote_qpn; | |
a20652e1 LR |
2112 | |
2113 | event->ece.vendor_id = rep_data->ece.vendor_id; | |
2114 | event->ece.attr_mod = rep_data->ece.attr_mod; | |
a1b1b61f SH |
2115 | } |
2116 | ||
ed999f82 CL |
2117 | static int cma_cm_event_handler(struct rdma_id_private *id_priv, |
2118 | struct rdma_cm_event *event) | |
2119 | { | |
2120 | int ret; | |
2121 | ||
3647a28d JG |
2122 | lockdep_assert_held(&id_priv->handler_mutex); |
2123 | ||
ed999f82 CL |
2124 | trace_cm_event_handler(id_priv, event); |
2125 | ret = id_priv->id.event_handler(&id_priv->id, event); | |
2126 | trace_cm_event_done(id_priv, event, ret); | |
2127 | return ret; | |
2128 | } | |
2129 | ||
e7ff98ae PP |
2130 | static int cma_ib_handler(struct ib_cm_id *cm_id, |
2131 | const struct ib_cm_event *ib_event) | |
e51060f0 SH |
2132 | { |
2133 | struct rdma_id_private *id_priv = cm_id->context; | |
7582df82 | 2134 | struct rdma_cm_event event = {}; |
2a7cec53 | 2135 | enum rdma_cm_state state; |
f6a9d47a | 2136 | int ret; |
e51060f0 | 2137 | |
37e07cda | 2138 | mutex_lock(&id_priv->handler_mutex); |
2a7cec53 | 2139 | state = READ_ONCE(id_priv->state); |
38ca83a5 | 2140 | if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && |
2a7cec53 | 2141 | state != RDMA_CM_CONNECT) || |
38ca83a5 | 2142 | (ib_event->event == IB_CM_TIMEWAIT_EXIT && |
2a7cec53 | 2143 | state != RDMA_CM_DISCONNECT)) |
37e07cda | 2144 | goto out; |
e51060f0 SH |
2145 | |
2146 | switch (ib_event->event) { | |
2147 | case IB_CM_REQ_ERROR: | |
2148 | case IB_CM_REP_ERROR: | |
a1b1b61f SH |
2149 | event.event = RDMA_CM_EVENT_UNREACHABLE; |
2150 | event.status = -ETIMEDOUT; | |
e51060f0 SH |
2151 | break; |
2152 | case IB_CM_REP_RECEIVED: | |
2a7cec53 | 2153 | if (state == RDMA_CM_CONNECT && |
ed999f82 CL |
2154 | (id_priv->id.qp_type != IB_QPT_UD)) { |
2155 | trace_cm_send_mra(id_priv); | |
61c0ddbe | 2156 | ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); |
ed999f82 | 2157 | } |
01602f11 | 2158 | if (id_priv->id.qp) { |
a1b1b61f SH |
2159 | event.status = cma_rep_recv(id_priv); |
2160 | event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : | |
2161 | RDMA_CM_EVENT_ESTABLISHED; | |
01602f11 | 2162 | } else { |
a1b1b61f | 2163 | event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; |
01602f11 | 2164 | } |
a1b1b61f SH |
2165 | cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, |
2166 | ib_event->private_data); | |
e51060f0 SH |
2167 | break; |
2168 | case IB_CM_RTU_RECEIVED: | |
0fe313b0 SH |
2169 | case IB_CM_USER_ESTABLISHED: |
2170 | event.event = RDMA_CM_EVENT_ESTABLISHED; | |
e51060f0 SH |
2171 | break; |
2172 | case IB_CM_DREQ_ERROR: | |
df561f66 GS |
2173 | event.status = -ETIMEDOUT; |
2174 | fallthrough; | |
e51060f0 SH |
2175 | case IB_CM_DREQ_RECEIVED: |
2176 | case IB_CM_DREP_RECEIVED: | |
550e5ca7 NM |
2177 | if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, |
2178 | RDMA_CM_DISCONNECT)) | |
e51060f0 | 2179 | goto out; |
a1b1b61f | 2180 | event.event = RDMA_CM_EVENT_DISCONNECTED; |
e51060f0 SH |
2181 | break; |
2182 | case IB_CM_TIMEWAIT_EXIT: | |
38ca83a5 AV |
2183 | event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; |
2184 | break; | |
e51060f0 SH |
2185 | case IB_CM_MRA_RECEIVED: |
2186 | /* ignore event */ | |
2187 | goto out; | |
2188 | case IB_CM_REJ_RECEIVED: | |
498683c6 MS |
2189 | pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv->id, |
2190 | ib_event->param.rej_rcvd.reason)); | |
c5483388 | 2191 | cma_modify_qp_err(id_priv); |
a1b1b61f SH |
2192 | event.status = ib_event->param.rej_rcvd.reason; |
2193 | event.event = RDMA_CM_EVENT_REJECTED; | |
2194 | event.param.conn.private_data = ib_event->private_data; | |
2195 | event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; | |
e51060f0 SH |
2196 | break; |
2197 | default: | |
aba25a3e | 2198 | pr_err("RDMA CMA: unexpected IB CM event: %d\n", |
e51060f0 SH |
2199 | ib_event->event); |
2200 | goto out; | |
2201 | } | |
2202 | ||
ed999f82 | 2203 | ret = cma_cm_event_handler(id_priv, &event); |
e51060f0 SH |
2204 | if (ret) { |
2205 | /* Destroy the CM ID by returning a non-zero value. */ | |
2206 | id_priv->cm_id.ib = NULL; | |
f6a9d47a | 2207 | destroy_id_handler_unlock(id_priv); |
e51060f0 SH |
2208 | return ret; |
2209 | } | |
2210 | out: | |
de910bd9 | 2211 | mutex_unlock(&id_priv->handler_mutex); |
f6a9d47a | 2212 | return 0; |
e51060f0 SH |
2213 | } |
2214 | ||
e7ff98ae | 2215 | static struct rdma_id_private * |
85463316 PP |
2216 | cma_ib_new_conn_id(const struct rdma_cm_id *listen_id, |
2217 | const struct ib_cm_event *ib_event, | |
2218 | struct net_device *net_dev) | |
e51060f0 | 2219 | { |
00313983 | 2220 | struct rdma_id_private *listen_id_priv; |
e51060f0 SH |
2221 | struct rdma_id_private *id_priv; |
2222 | struct rdma_cm_id *id; | |
2223 | struct rdma_route *rt; | |
0c505f70 | 2224 | const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; |
9fdca4da | 2225 | struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path; |
d3957b86 MD |
2226 | const __be64 service_id = |
2227 | ib_event->param.req_rcvd.primary_path->service_id; | |
64c5e613 | 2228 | int ret; |
e51060f0 | 2229 | |
00313983 | 2230 | listen_id_priv = container_of(listen_id, struct rdma_id_private, id); |
b09c4d70 LR |
2231 | id_priv = __rdma_create_id(listen_id->route.addr.dev_addr.net, |
2232 | listen_id->event_handler, listen_id->context, | |
2233 | listen_id->ps, | |
2234 | ib_event->param.req_rcvd.qp_type, | |
2235 | listen_id_priv); | |
2236 | if (IS_ERR(id_priv)) | |
0c9361fc | 2237 | return NULL; |
3f168d2b | 2238 | |
b09c4d70 | 2239 | id = &id_priv->id; |
0c505f70 HE |
2240 | if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, |
2241 | (struct sockaddr *)&id->route.addr.dst_addr, | |
2242 | listen_id, ib_event, ss_family, service_id)) | |
fbaa1a6d | 2243 | goto err; |
e51060f0 SH |
2244 | |
2245 | rt = &id->route; | |
bf9a9928 MZ |
2246 | rt->num_pri_alt_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; |
2247 | rt->path_rec = kmalloc_array(rt->num_pri_alt_paths, | |
2248 | sizeof(*rt->path_rec), GFP_KERNEL); | |
e51060f0 | 2249 | if (!rt->path_rec) |
0c9361fc | 2250 | goto err; |
e51060f0 | 2251 | |
9fdca4da | 2252 | rt->path_rec[0] = *path; |
bf9a9928 | 2253 | if (rt->num_pri_alt_paths == 2) |
e51060f0 SH |
2254 | rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; |
2255 | ||
0b3ca768 | 2256 | if (net_dev) { |
77addc52 | 2257 | rdma_copy_src_l2_addr(&rt->addr.dev_addr, net_dev); |
0b3ca768 | 2258 | } else { |
b8cab5da HE |
2259 | if (!cma_protocol_roce(listen_id) && |
2260 | cma_any_addr(cma_src_addr(id_priv))) { | |
2261 | rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; | |
2262 | rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); | |
2263 | ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); | |
2264 | } else if (!cma_any_addr(cma_src_addr(id_priv))) { | |
2265 | ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); | |
2266 | if (ret) | |
2267 | goto err; | |
2268 | } | |
6f8372b6 SH |
2269 | } |
2270 | rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); | |
e51060f0 | 2271 | |
550e5ca7 | 2272 | id_priv->state = RDMA_CM_CONNECT; |
e51060f0 | 2273 | return id_priv; |
3f168d2b | 2274 | |
3f168d2b | 2275 | err: |
0c9361fc | 2276 | rdma_destroy_id(id); |
e51060f0 SH |
2277 | return NULL; |
2278 | } | |
2279 | ||
e7ff98ae | 2280 | static struct rdma_id_private * |
85463316 PP |
2281 | cma_ib_new_udp_id(const struct rdma_cm_id *listen_id, |
2282 | const struct ib_cm_event *ib_event, | |
2283 | struct net_device *net_dev) | |
628e5f6d | 2284 | { |
e7ff98ae | 2285 | const struct rdma_id_private *listen_id_priv; |
628e5f6d SH |
2286 | struct rdma_id_private *id_priv; |
2287 | struct rdma_cm_id *id; | |
0c505f70 | 2288 | const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; |
fa20105e | 2289 | struct net *net = listen_id->route.addr.dev_addr.net; |
628e5f6d SH |
2290 | int ret; |
2291 | ||
00313983 | 2292 | listen_id_priv = container_of(listen_id, struct rdma_id_private, id); |
b09c4d70 LR |
2293 | id_priv = __rdma_create_id(net, listen_id->event_handler, |
2294 | listen_id->context, listen_id->ps, IB_QPT_UD, | |
2295 | listen_id_priv); | |
2296 | if (IS_ERR(id_priv)) | |
628e5f6d SH |
2297 | return NULL; |
2298 | ||
b09c4d70 | 2299 | id = &id_priv->id; |
0c505f70 HE |
2300 | if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, |
2301 | (struct sockaddr *)&id->route.addr.dst_addr, | |
2302 | listen_id, ib_event, ss_family, | |
2303 | ib_event->param.sidr_req_rcvd.service_id)) | |
628e5f6d SH |
2304 | goto err; |
2305 | ||
0b3ca768 | 2306 | if (net_dev) { |
77addc52 | 2307 | rdma_copy_src_l2_addr(&id->route.addr.dev_addr, net_dev); |
0b3ca768 | 2308 | } else { |
b8cab5da HE |
2309 | if (!cma_any_addr(cma_src_addr(id_priv))) { |
2310 | ret = cma_translate_addr(cma_src_addr(id_priv), | |
2311 | &id->route.addr.dev_addr); | |
2312 | if (ret) | |
2313 | goto err; | |
2314 | } | |
6f8372b6 | 2315 | } |
628e5f6d | 2316 | |
550e5ca7 | 2317 | id_priv->state = RDMA_CM_CONNECT; |
628e5f6d SH |
2318 | return id_priv; |
2319 | err: | |
2320 | rdma_destroy_id(id); | |
2321 | return NULL; | |
2322 | } | |
2323 | ||
a1b1b61f | 2324 | static void cma_set_req_event_data(struct rdma_cm_event *event, |
e7ff98ae | 2325 | const struct ib_cm_req_event_param *req_data, |
a1b1b61f SH |
2326 | void *private_data, int offset) |
2327 | { | |
2328 | event->param.conn.private_data = private_data + offset; | |
2329 | event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; | |
2330 | event->param.conn.responder_resources = req_data->responder_resources; | |
2331 | event->param.conn.initiator_depth = req_data->initiator_depth; | |
2332 | event->param.conn.flow_control = req_data->flow_control; | |
2333 | event->param.conn.retry_count = req_data->retry_count; | |
2334 | event->param.conn.rnr_retry_count = req_data->rnr_retry_count; | |
2335 | event->param.conn.srq = req_data->srq; | |
2336 | event->param.conn.qp_num = req_data->remote_qpn; | |
a20652e1 LR |
2337 | |
2338 | event->ece.vendor_id = req_data->ece.vendor_id; | |
2339 | event->ece.attr_mod = req_data->ece.attr_mod; | |
a1b1b61f SH |
2340 | } |
2341 | ||
85463316 PP |
2342 | static int cma_ib_check_req_qp_type(const struct rdma_cm_id *id, |
2343 | const struct ib_cm_event *ib_event) | |
9595480c | 2344 | { |
4dd81e89 | 2345 | return (((ib_event->event == IB_CM_REQ_RECEIVED) && |
9595480c HS |
2346 | (ib_event->param.req_rcvd.qp_type == id->qp_type)) || |
2347 | ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && | |
2348 | (id->qp_type == IB_QPT_UD)) || | |
2349 | (!id->qp_type)); | |
2350 | } | |
2351 | ||
85463316 PP |
2352 | static int cma_ib_req_handler(struct ib_cm_id *cm_id, |
2353 | const struct ib_cm_event *ib_event) | |
e51060f0 | 2354 | { |
37e07cda | 2355 | struct rdma_id_private *listen_id, *conn_id = NULL; |
7582df82 | 2356 | struct rdma_cm_event event = {}; |
41ab1cb7 | 2357 | struct cma_req_info req = {}; |
0b3ca768 | 2358 | struct net_device *net_dev; |
c0b64f58 BVA |
2359 | u8 offset; |
2360 | int ret; | |
e51060f0 | 2361 | |
41ab1cb7 | 2362 | listen_id = cma_ib_id_from_event(cm_id, ib_event, &req, &net_dev); |
4c21b5bc HE |
2363 | if (IS_ERR(listen_id)) |
2364 | return PTR_ERR(listen_id); | |
2365 | ||
ed999f82 | 2366 | trace_cm_req_handler(listen_id, ib_event->event); |
85463316 | 2367 | if (!cma_ib_check_req_qp_type(&listen_id->id, ib_event)) { |
0b3ca768 HE |
2368 | ret = -EINVAL; |
2369 | goto net_dev_put; | |
2370 | } | |
9595480c | 2371 | |
37e07cda | 2372 | mutex_lock(&listen_id->handler_mutex); |
d490ee52 | 2373 | if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) { |
0b3ca768 | 2374 | ret = -ECONNABORTED; |
f6a9d47a | 2375 | goto err_unlock; |
0b3ca768 | 2376 | } |
e51060f0 | 2377 | |
e8160e15 | 2378 | offset = cma_user_data_offset(listen_id); |
628e5f6d | 2379 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; |
9595480c | 2380 | if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { |
85463316 | 2381 | conn_id = cma_ib_new_udp_id(&listen_id->id, ib_event, net_dev); |
628e5f6d SH |
2382 | event.param.ud.private_data = ib_event->private_data + offset; |
2383 | event.param.ud.private_data_len = | |
2384 | IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; | |
2385 | } else { | |
85463316 | 2386 | conn_id = cma_ib_new_conn_id(&listen_id->id, ib_event, net_dev); |
628e5f6d SH |
2387 | cma_set_req_event_data(&event, &ib_event->param.req_rcvd, |
2388 | ib_event->private_data, offset); | |
2389 | } | |
e51060f0 SH |
2390 | if (!conn_id) { |
2391 | ret = -ENOMEM; | |
f6a9d47a | 2392 | goto err_unlock; |
e51060f0 SH |
2393 | } |
2394 | ||
de910bd9 | 2395 | mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); |
41ab1cb7 | 2396 | ret = cma_ib_acquire_dev(conn_id, listen_id, &req); |
f6a9d47a JG |
2397 | if (ret) { |
2398 | destroy_id_handler_unlock(conn_id); | |
2399 | goto err_unlock; | |
2400 | } | |
e51060f0 SH |
2401 | |
2402 | conn_id->cm_id.ib = cm_id; | |
2403 | cm_id->context = conn_id; | |
2404 | cm_id->cm_handler = cma_ib_handler; | |
2405 | ||
ed999f82 | 2406 | ret = cma_cm_event_handler(conn_id, &event); |
f6a9d47a JG |
2407 | if (ret) { |
2408 | /* Destroy the CM ID by returning a non-zero value. */ | |
2409 | conn_id->cm_id.ib = NULL; | |
2410 | mutex_unlock(&listen_id->handler_mutex); | |
2411 | destroy_id_handler_unlock(conn_id); | |
2412 | goto net_dev_put; | |
2413 | } | |
2414 | ||
2a7cec53 JG |
2415 | if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT && |
2416 | conn_id->id.qp_type != IB_QPT_UD) { | |
ed999f82 | 2417 | trace_cm_send_mra(cm_id->context); |
b6cec8aa | 2418 | ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); |
ed999f82 | 2419 | } |
b6cec8aa | 2420 | mutex_unlock(&conn_id->handler_mutex); |
a1a733f6 | 2421 | |
f6a9d47a | 2422 | err_unlock: |
de910bd9 | 2423 | mutex_unlock(&listen_id->handler_mutex); |
0b3ca768 HE |
2424 | |
2425 | net_dev_put: | |
2426 | if (net_dev) | |
2427 | dev_put(net_dev); | |
2428 | ||
e51060f0 SH |
2429 | return ret; |
2430 | } | |
2431 | ||
cf53936f | 2432 | __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr) |
e51060f0 | 2433 | { |
496ce3ce SH |
2434 | if (addr->sa_family == AF_IB) |
2435 | return ((struct sockaddr_ib *) addr)->sib_sid; | |
2436 | ||
cf53936f | 2437 | return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr))); |
e51060f0 | 2438 | } |
cf53936f | 2439 | EXPORT_SYMBOL(rdma_get_service_id); |
e51060f0 | 2440 | |
411460ac PP |
2441 | void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid, |
2442 | union ib_gid *dgid) | |
2443 | { | |
2444 | struct rdma_addr *addr = &cm_id->route.addr; | |
2445 | ||
2446 | if (!cm_id->device) { | |
2447 | if (sgid) | |
2448 | memset(sgid, 0, sizeof(*sgid)); | |
2449 | if (dgid) | |
2450 | memset(dgid, 0, sizeof(*dgid)); | |
2451 | return; | |
2452 | } | |
2453 | ||
2454 | if (rdma_protocol_roce(cm_id->device, cm_id->port_num)) { | |
2455 | if (sgid) | |
2456 | rdma_ip2gid((struct sockaddr *)&addr->src_addr, sgid); | |
2457 | if (dgid) | |
2458 | rdma_ip2gid((struct sockaddr *)&addr->dst_addr, dgid); | |
2459 | } else { | |
2460 | if (sgid) | |
2461 | rdma_addr_get_sgid(&addr->dev_addr, sgid); | |
2462 | if (dgid) | |
2463 | rdma_addr_get_dgid(&addr->dev_addr, dgid); | |
2464 | } | |
2465 | } | |
2466 | EXPORT_SYMBOL(rdma_read_gids); | |
2467 | ||
07ebafba TT |
2468 | static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) |
2469 | { | |
2470 | struct rdma_id_private *id_priv = iw_id->context; | |
7582df82 | 2471 | struct rdma_cm_event event = {}; |
07ebafba | 2472 | int ret = 0; |
24d44a39 SW |
2473 | struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; |
2474 | struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; | |
07ebafba | 2475 | |
37e07cda | 2476 | mutex_lock(&id_priv->handler_mutex); |
2a7cec53 | 2477 | if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) |
37e07cda | 2478 | goto out; |
07ebafba TT |
2479 | |
2480 | switch (iw_event->event) { | |
2481 | case IW_CM_EVENT_CLOSE: | |
a1b1b61f | 2482 | event.event = RDMA_CM_EVENT_DISCONNECTED; |
07ebafba TT |
2483 | break; |
2484 | case IW_CM_EVENT_CONNECT_REPLY: | |
24d44a39 SW |
2485 | memcpy(cma_src_addr(id_priv), laddr, |
2486 | rdma_addr_size(laddr)); | |
2487 | memcpy(cma_dst_addr(id_priv), raddr, | |
2488 | rdma_addr_size(raddr)); | |
881a045f SW |
2489 | switch (iw_event->status) { |
2490 | case 0: | |
a1b1b61f | 2491 | event.event = RDMA_CM_EVENT_ESTABLISHED; |
3ebeebc3 KS |
2492 | event.param.conn.initiator_depth = iw_event->ird; |
2493 | event.param.conn.responder_resources = iw_event->ord; | |
881a045f SW |
2494 | break; |
2495 | case -ECONNRESET: | |
2496 | case -ECONNREFUSED: | |
2497 | event.event = RDMA_CM_EVENT_REJECTED; | |
2498 | break; | |
2499 | case -ETIMEDOUT: | |
2500 | event.event = RDMA_CM_EVENT_UNREACHABLE; | |
2501 | break; | |
2502 | default: | |
2503 | event.event = RDMA_CM_EVENT_CONNECT_ERROR; | |
2504 | break; | |
2505 | } | |
07ebafba TT |
2506 | break; |
2507 | case IW_CM_EVENT_ESTABLISHED: | |
a1b1b61f | 2508 | event.event = RDMA_CM_EVENT_ESTABLISHED; |
3ebeebc3 KS |
2509 | event.param.conn.initiator_depth = iw_event->ird; |
2510 | event.param.conn.responder_resources = iw_event->ord; | |
07ebafba TT |
2511 | break; |
2512 | default: | |
671a6cc2 | 2513 | goto out; |
07ebafba TT |
2514 | } |
2515 | ||
a1b1b61f SH |
2516 | event.status = iw_event->status; |
2517 | event.param.conn.private_data = iw_event->private_data; | |
2518 | event.param.conn.private_data_len = iw_event->private_data_len; | |
ed999f82 | 2519 | ret = cma_cm_event_handler(id_priv, &event); |
07ebafba TT |
2520 | if (ret) { |
2521 | /* Destroy the CM ID by returning a non-zero value. */ | |
2522 | id_priv->cm_id.iw = NULL; | |
f6a9d47a | 2523 | destroy_id_handler_unlock(id_priv); |
07ebafba TT |
2524 | return ret; |
2525 | } | |
2526 | ||
37e07cda | 2527 | out: |
de910bd9 | 2528 | mutex_unlock(&id_priv->handler_mutex); |
07ebafba TT |
2529 | return ret; |
2530 | } | |
2531 | ||
2532 | static int iw_conn_req_handler(struct iw_cm_id *cm_id, | |
2533 | struct iw_cm_event *iw_event) | |
2534 | { | |
07ebafba | 2535 | struct rdma_id_private *listen_id, *conn_id; |
7582df82 | 2536 | struct rdma_cm_event event = {}; |
37e07cda | 2537 | int ret = -ECONNABORTED; |
24d44a39 SW |
2538 | struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; |
2539 | struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; | |
07ebafba | 2540 | |
7582df82 PP |
2541 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; |
2542 | event.param.conn.private_data = iw_event->private_data; | |
2543 | event.param.conn.private_data_len = iw_event->private_data_len; | |
2544 | event.param.conn.initiator_depth = iw_event->ird; | |
2545 | event.param.conn.responder_resources = iw_event->ord; | |
2546 | ||
07ebafba | 2547 | listen_id = cm_id->context; |
37e07cda BVA |
2548 | |
2549 | mutex_lock(&listen_id->handler_mutex); | |
d490ee52 | 2550 | if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) |
37e07cda | 2551 | goto out; |
07ebafba TT |
2552 | |
2553 | /* Create a new RDMA id for the new IW CM ID */ | |
b09c4d70 LR |
2554 | conn_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net, |
2555 | listen_id->id.event_handler, | |
2556 | listen_id->id.context, RDMA_PS_TCP, | |
2557 | IB_QPT_RC, listen_id); | |
2558 | if (IS_ERR(conn_id)) { | |
07ebafba TT |
2559 | ret = -ENOMEM; |
2560 | goto out; | |
2561 | } | |
de910bd9 | 2562 | mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); |
550e5ca7 | 2563 | conn_id->state = RDMA_CM_CONNECT; |
07ebafba | 2564 | |
575c7e58 | 2565 | ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr); |
07ebafba | 2566 | if (ret) { |
f6a9d47a JG |
2567 | mutex_unlock(&listen_id->handler_mutex); |
2568 | destroy_id_handler_unlock(conn_id); | |
2569 | return ret; | |
07ebafba TT |
2570 | } |
2571 | ||
41ab1cb7 | 2572 | ret = cma_iw_acquire_dev(conn_id, listen_id); |
07ebafba | 2573 | if (ret) { |
f6a9d47a JG |
2574 | mutex_unlock(&listen_id->handler_mutex); |
2575 | destroy_id_handler_unlock(conn_id); | |
2576 | return ret; | |
07ebafba TT |
2577 | } |
2578 | ||
2579 | conn_id->cm_id.iw = cm_id; | |
2580 | cm_id->context = conn_id; | |
2581 | cm_id->cm_handler = cma_iw_handler; | |
2582 | ||
24d44a39 SW |
2583 | memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr)); |
2584 | memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr)); | |
07ebafba | 2585 | |
ed999f82 | 2586 | ret = cma_cm_event_handler(conn_id, &event); |
07ebafba TT |
2587 | if (ret) { |
2588 | /* User wants to destroy the CM ID */ | |
2589 | conn_id->cm_id.iw = NULL; | |
b66f31ef | 2590 | mutex_unlock(&listen_id->handler_mutex); |
f6a9d47a | 2591 | destroy_id_handler_unlock(conn_id); |
b66f31ef | 2592 | return ret; |
07ebafba TT |
2593 | } |
2594 | ||
de910bd9 OG |
2595 | mutex_unlock(&conn_id->handler_mutex); |
2596 | ||
07ebafba | 2597 | out: |
de910bd9 | 2598 | mutex_unlock(&listen_id->handler_mutex); |
07ebafba TT |
2599 | return ret; |
2600 | } | |
2601 | ||
e51060f0 SH |
2602 | static int cma_ib_listen(struct rdma_id_private *id_priv) |
2603 | { | |
e51060f0 | 2604 | struct sockaddr *addr; |
0c9361fc | 2605 | struct ib_cm_id *id; |
e51060f0 | 2606 | __be64 svc_id; |
e51060f0 | 2607 | |
51efe394 HE |
2608 | addr = cma_src_addr(id_priv); |
2609 | svc_id = rdma_get_service_id(&id_priv->id, addr); | |
85463316 PP |
2610 | id = ib_cm_insert_listen(id_priv->id.device, |
2611 | cma_ib_req_handler, svc_id); | |
0c9361fc JM |
2612 | if (IS_ERR(id)) |
2613 | return PTR_ERR(id); | |
0c9361fc | 2614 | id_priv->cm_id.ib = id; |
e51060f0 | 2615 | |
51efe394 | 2616 | return 0; |
e51060f0 SH |
2617 | } |
2618 | ||
07ebafba TT |
2619 | static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) |
2620 | { | |
2621 | int ret; | |
0c9361fc JM |
2622 | struct iw_cm_id *id; |
2623 | ||
2624 | id = iw_create_cm_id(id_priv->id.device, | |
2625 | iw_conn_req_handler, | |
2626 | id_priv); | |
2627 | if (IS_ERR(id)) | |
2628 | return PTR_ERR(id); | |
07ebafba | 2629 | |
ca0c448d | 2630 | mutex_lock(&id_priv->qp_mutex); |
68cdba06 | 2631 | id->tos = id_priv->tos; |
926ba19b | 2632 | id->tos_set = id_priv->tos_set; |
ca0c448d | 2633 | mutex_unlock(&id_priv->qp_mutex); |
e35ecb46 | 2634 | id->afonly = id_priv->afonly; |
0c9361fc | 2635 | id_priv->cm_id.iw = id; |
07ebafba | 2636 | |
24d44a39 SW |
2637 | memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), |
2638 | rdma_addr_size(cma_src_addr(id_priv))); | |
07ebafba TT |
2639 | |
2640 | ret = iw_cm_listen(id_priv->cm_id.iw, backlog); | |
2641 | ||
2642 | if (ret) { | |
2643 | iw_destroy_cm_id(id_priv->cm_id.iw); | |
2644 | id_priv->cm_id.iw = NULL; | |
2645 | } | |
2646 | ||
2647 | return ret; | |
2648 | } | |
2649 | ||
e51060f0 SH |
2650 | static int cma_listen_handler(struct rdma_cm_id *id, |
2651 | struct rdma_cm_event *event) | |
2652 | { | |
2653 | struct rdma_id_private *id_priv = id->context; | |
2654 | ||
d54f23c0 JG |
2655 | /* Listening IDs are always destroyed on removal */ |
2656 | if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) | |
2657 | return -1; | |
2658 | ||
e51060f0 SH |
2659 | id->context = id_priv->id.context; |
2660 | id->event_handler = id_priv->id.event_handler; | |
ed999f82 | 2661 | trace_cm_event_handler(id_priv, event); |
e51060f0 SH |
2662 | return id_priv->id.event_handler(id, event); |
2663 | } | |
2664 | ||
c80a0c52 | 2665 | static int cma_listen_on_dev(struct rdma_id_private *id_priv, |
dd37d2f5 JG |
2666 | struct cma_device *cma_dev, |
2667 | struct rdma_id_private **to_destroy) | |
e51060f0 SH |
2668 | { |
2669 | struct rdma_id_private *dev_id_priv; | |
fa20105e | 2670 | struct net *net = id_priv->id.route.addr.dev_addr.net; |
e51060f0 SH |
2671 | int ret; |
2672 | ||
730c8912 MZ |
2673 | lockdep_assert_held(&lock); |
2674 | ||
dd37d2f5 | 2675 | *to_destroy = NULL; |
72219cea | 2676 | if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) |
c80a0c52 | 2677 | return 0; |
94d0c939 | 2678 | |
b09c4d70 LR |
2679 | dev_id_priv = |
2680 | __rdma_create_id(net, cma_listen_handler, id_priv, | |
2681 | id_priv->id.ps, id_priv->id.qp_type, id_priv); | |
2682 | if (IS_ERR(dev_id_priv)) | |
c80a0c52 | 2683 | return PTR_ERR(dev_id_priv); |
e51060f0 | 2684 | |
550e5ca7 | 2685 | dev_id_priv->state = RDMA_CM_ADDR_BOUND; |
f4753834 SH |
2686 | memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), |
2687 | rdma_addr_size(cma_src_addr(id_priv))); | |
e51060f0 | 2688 | |
045959db | 2689 | _cma_attach_to_dev(dev_id_priv, cma_dev); |
cb5cd0ea | 2690 | rdma_restrack_add(&dev_id_priv->res); |
e368d23f | 2691 | cma_id_get(id_priv); |
d02d1f53 | 2692 | dev_id_priv->internal_id = 1; |
5b0ec991 | 2693 | dev_id_priv->afonly = id_priv->afonly; |
ca0c448d | 2694 | mutex_lock(&id_priv->qp_mutex); |
9491128f SW |
2695 | dev_id_priv->tos_set = id_priv->tos_set; |
2696 | dev_id_priv->tos = id_priv->tos; | |
ca0c448d | 2697 | mutex_unlock(&id_priv->qp_mutex); |
e51060f0 | 2698 | |
b09c4d70 | 2699 | ret = rdma_listen(&dev_id_priv->id, id_priv->backlog); |
e51060f0 | 2700 | if (ret) |
c80a0c52 | 2701 | goto err_listen; |
99cfddb8 | 2702 | list_add_tail(&dev_id_priv->listen_item, &id_priv->listen_list); |
c80a0c52 LR |
2703 | return 0; |
2704 | err_listen: | |
dd37d2f5 JG |
2705 | /* Caller must destroy this after releasing lock */ |
2706 | *to_destroy = dev_id_priv; | |
c80a0c52 | 2707 | dev_warn(&cma_dev->device->dev, "RDMA CMA: %s, error %d\n", __func__, ret); |
c80a0c52 | 2708 | return ret; |
e51060f0 SH |
2709 | } |
2710 | ||
c80a0c52 | 2711 | static int cma_listen_on_all(struct rdma_id_private *id_priv) |
e51060f0 | 2712 | { |
dd37d2f5 | 2713 | struct rdma_id_private *to_destroy; |
e51060f0 | 2714 | struct cma_device *cma_dev; |
c80a0c52 | 2715 | int ret; |
e51060f0 SH |
2716 | |
2717 | mutex_lock(&lock); | |
99cfddb8 | 2718 | list_add_tail(&id_priv->listen_any_item, &listen_any_list); |
c80a0c52 | 2719 | list_for_each_entry(cma_dev, &dev_list, list) { |
dd37d2f5 JG |
2720 | ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); |
2721 | if (ret) { | |
2722 | /* Prevent racing with cma_process_remove() */ | |
2723 | if (to_destroy) | |
99cfddb8 | 2724 | list_del_init(&to_destroy->device_item); |
c80a0c52 | 2725 | goto err_listen; |
dd37d2f5 | 2726 | } |
c80a0c52 LR |
2727 | } |
2728 | mutex_unlock(&lock); | |
2729 | return 0; | |
2730 | ||
2731 | err_listen: | |
ca465e1f | 2732 | _cma_cancel_listens(id_priv); |
e51060f0 | 2733 | mutex_unlock(&lock); |
dd37d2f5 JG |
2734 | if (to_destroy) |
2735 | rdma_destroy_id(&to_destroy->id); | |
c80a0c52 | 2736 | return ret; |
e51060f0 SH |
2737 | } |
2738 | ||
a81c994d SH |
2739 | void rdma_set_service_type(struct rdma_cm_id *id, int tos) |
2740 | { | |
2741 | struct rdma_id_private *id_priv; | |
2742 | ||
2743 | id_priv = container_of(id, struct rdma_id_private, id); | |
ca0c448d | 2744 | mutex_lock(&id_priv->qp_mutex); |
a81c994d | 2745 | id_priv->tos = (u8) tos; |
89052d78 | 2746 | id_priv->tos_set = true; |
ca0c448d | 2747 | mutex_unlock(&id_priv->qp_mutex); |
a81c994d SH |
2748 | } |
2749 | EXPORT_SYMBOL(rdma_set_service_type); | |
2750 | ||
2c1619ed DG |
2751 | /** |
2752 | * rdma_set_ack_timeout() - Set the ack timeout of QP associated | |
2753 | * with a connection identifier. | |
2754 | * @id: Communication identifier to associated with service type. | |
2755 | * @timeout: Ack timeout to set a QP, expressed as 4.096 * 2^(timeout) usec. | |
2756 | * | |
2757 | * This function should be called before rdma_connect() on active side, | |
2758 | * and on passive side before rdma_accept(). It is applicable to primary | |
2759 | * path only. The timeout will affect the local side of the QP, it is not | |
e1ee1e62 DM |
2760 | * negotiated with remote side and zero disables the timer. In case it is |
2761 | * set before rdma_resolve_route, the value will also be used to determine | |
2762 | * PacketLifeTime for RoCE. | |
2c1619ed DG |
2763 | * |
2764 | * Return: 0 for success | |
2765 | */ | |
2766 | int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout) | |
2767 | { | |
2768 | struct rdma_id_private *id_priv; | |
2769 | ||
748663c8 | 2770 | if (id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_INI) |
2c1619ed DG |
2771 | return -EINVAL; |
2772 | ||
2773 | id_priv = container_of(id, struct rdma_id_private, id); | |
ca0c448d | 2774 | mutex_lock(&id_priv->qp_mutex); |
2c1619ed DG |
2775 | id_priv->timeout = timeout; |
2776 | id_priv->timeout_set = true; | |
ca0c448d | 2777 | mutex_unlock(&id_priv->qp_mutex); |
2c1619ed DG |
2778 | |
2779 | return 0; | |
2780 | } | |
2781 | EXPORT_SYMBOL(rdma_set_ack_timeout); | |
2782 | ||
3aeffc46 HB |
2783 | /** |
2784 | * rdma_set_min_rnr_timer() - Set the minimum RNR Retry timer of the | |
2785 | * QP associated with a connection identifier. | |
2786 | * @id: Communication identifier to associated with service type. | |
2787 | * @min_rnr_timer: 5-bit value encoded as Table 45: "Encoding for RNR NAK | |
2788 | * Timer Field" in the IBTA specification. | |
2789 | * | |
2790 | * This function should be called before rdma_connect() on active | |
2791 | * side, and on passive side before rdma_accept(). The timer value | |
2792 | * will be associated with the local QP. When it receives a send it is | |
2793 | * not read to handle, typically if the receive queue is empty, an RNR | |
2794 | * Retry NAK is returned to the requester with the min_rnr_timer | |
2795 | * encoded. The requester will then wait at least the time specified | |
2796 | * in the NAK before retrying. The default is zero, which translates | |
2797 | * to a minimum RNR Timer value of 655 ms. | |
2798 | * | |
2799 | * Return: 0 for success | |
2800 | */ | |
2801 | int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer) | |
2802 | { | |
2803 | struct rdma_id_private *id_priv; | |
2804 | ||
2805 | /* It is a five-bit value */ | |
2806 | if (min_rnr_timer & 0xe0) | |
2807 | return -EINVAL; | |
2808 | ||
2809 | if (WARN_ON(id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_TGT)) | |
2810 | return -EINVAL; | |
2811 | ||
2812 | id_priv = container_of(id, struct rdma_id_private, id); | |
ca0c448d | 2813 | mutex_lock(&id_priv->qp_mutex); |
3aeffc46 HB |
2814 | id_priv->min_rnr_timer = min_rnr_timer; |
2815 | id_priv->min_rnr_timer_set = true; | |
ca0c448d | 2816 | mutex_unlock(&id_priv->qp_mutex); |
3aeffc46 HB |
2817 | |
2818 | return 0; | |
2819 | } | |
2820 | EXPORT_SYMBOL(rdma_set_min_rnr_timer); | |
2821 | ||
5a374949 MZ |
2822 | static void route_set_path_rec_inbound(struct cma_work *work, |
2823 | struct sa_path_rec *path_rec) | |
2824 | { | |
2825 | struct rdma_route *route = &work->id->id.route; | |
2826 | ||
2827 | if (!route->path_rec_inbound) { | |
2828 | route->path_rec_inbound = | |
2829 | kzalloc(sizeof(*route->path_rec_inbound), GFP_KERNEL); | |
2830 | if (!route->path_rec_inbound) | |
2831 | return; | |
2832 | } | |
2833 | ||
2834 | *route->path_rec_inbound = *path_rec; | |
2835 | } | |
2836 | ||
2837 | static void route_set_path_rec_outbound(struct cma_work *work, | |
2838 | struct sa_path_rec *path_rec) | |
2839 | { | |
2840 | struct rdma_route *route = &work->id->id.route; | |
2841 | ||
2842 | if (!route->path_rec_outbound) { | |
2843 | route->path_rec_outbound = | |
2844 | kzalloc(sizeof(*route->path_rec_outbound), GFP_KERNEL); | |
2845 | if (!route->path_rec_outbound) | |
2846 | return; | |
2847 | } | |
2848 | ||
2849 | *route->path_rec_outbound = *path_rec; | |
2850 | } | |
2851 | ||
c2f8fc4e | 2852 | static void cma_query_handler(int status, struct sa_path_rec *path_rec, |
5a374949 | 2853 | int num_prs, void *context) |
e51060f0 SH |
2854 | { |
2855 | struct cma_work *work = context; | |
2856 | struct rdma_route *route; | |
5a374949 | 2857 | int i; |
e51060f0 SH |
2858 | |
2859 | route = &work->id->id.route; | |
2860 | ||
5a374949 MZ |
2861 | if (status) |
2862 | goto fail; | |
2863 | ||
2864 | for (i = 0; i < num_prs; i++) { | |
2865 | if (!path_rec[i].flags || (path_rec[i].flags & IB_PATH_GMP)) | |
2866 | *route->path_rec = path_rec[i]; | |
2867 | else if (path_rec[i].flags & IB_PATH_INBOUND) | |
2868 | route_set_path_rec_inbound(work, &path_rec[i]); | |
2869 | else if (path_rec[i].flags & IB_PATH_OUTBOUND) | |
2870 | route_set_path_rec_outbound(work, &path_rec[i]); | |
2871 | } | |
2872 | if (!route->path_rec) { | |
2873 | status = -EINVAL; | |
2874 | goto fail; | |
e51060f0 SH |
2875 | } |
2876 | ||
5a374949 MZ |
2877 | route->num_pri_alt_paths = 1; |
2878 | queue_work(cma_wq, &work->work); | |
2879 | return; | |
2880 | ||
2881 | fail: | |
2882 | work->old_state = RDMA_CM_ROUTE_QUERY; | |
2883 | work->new_state = RDMA_CM_ADDR_RESOLVED; | |
2884 | work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; | |
2885 | work->event.status = status; | |
2886 | pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n", | |
2887 | status); | |
e51060f0 SH |
2888 | queue_work(cma_wq, &work->work); |
2889 | } | |
2890 | ||
dbace111 LR |
2891 | static int cma_query_ib_route(struct rdma_id_private *id_priv, |
2892 | unsigned long timeout_ms, struct cma_work *work) | |
e51060f0 | 2893 | { |
f4753834 | 2894 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; |
c2f8fc4e | 2895 | struct sa_path_rec path_rec; |
a81c994d SH |
2896 | ib_sa_comp_mask comp_mask; |
2897 | struct sockaddr_in6 *sin6; | |
f68194ca | 2898 | struct sockaddr_ib *sib; |
e51060f0 SH |
2899 | |
2900 | memset(&path_rec, 0, sizeof path_rec); | |
4c33bd19 DC |
2901 | |
2902 | if (rdma_cap_opa_ah(id_priv->id.device, id_priv->id.port_num)) | |
2903 | path_rec.rec_type = SA_PATH_REC_TYPE_OPA; | |
2904 | else | |
2905 | path_rec.rec_type = SA_PATH_REC_TYPE_IB; | |
f4753834 SH |
2906 | rdma_addr_get_sgid(dev_addr, &path_rec.sgid); |
2907 | rdma_addr_get_dgid(dev_addr, &path_rec.dgid); | |
2908 | path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); | |
e51060f0 | 2909 | path_rec.numb_path = 1; |
962063e6 | 2910 | path_rec.reversible = 1; |
d3957b86 MD |
2911 | path_rec.service_id = rdma_get_service_id(&id_priv->id, |
2912 | cma_dst_addr(id_priv)); | |
a81c994d SH |
2913 | |
2914 | comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | | |
2915 | IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | | |
2916 | IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; | |
2917 | ||
f68194ca SH |
2918 | switch (cma_family(id_priv)) { |
2919 | case AF_INET: | |
a81c994d SH |
2920 | path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); |
2921 | comp_mask |= IB_SA_PATH_REC_QOS_CLASS; | |
f68194ca SH |
2922 | break; |
2923 | case AF_INET6: | |
f4753834 | 2924 | sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); |
a81c994d SH |
2925 | path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); |
2926 | comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; | |
f68194ca SH |
2927 | break; |
2928 | case AF_IB: | |
2929 | sib = (struct sockaddr_ib *) cma_src_addr(id_priv); | |
2930 | path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20); | |
2931 | comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; | |
2932 | break; | |
a81c994d | 2933 | } |
e51060f0 | 2934 | |
c1a0b23b | 2935 | id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, |
a81c994d SH |
2936 | id_priv->id.port_num, &path_rec, |
2937 | comp_mask, timeout_ms, | |
2938 | GFP_KERNEL, cma_query_handler, | |
2939 | work, &id_priv->query); | |
e51060f0 SH |
2940 | |
2941 | return (id_priv->query_id < 0) ? id_priv->query_id : 0; | |
2942 | } | |
2943 | ||
fe454dc3 AH |
2944 | static void cma_iboe_join_work_handler(struct work_struct *work) |
2945 | { | |
2946 | struct cma_multicast *mc = | |
2947 | container_of(work, struct cma_multicast, iboe_join.work); | |
2948 | struct rdma_cm_event *event = &mc->iboe_join.event; | |
2949 | struct rdma_id_private *id_priv = mc->id_priv; | |
2950 | int ret; | |
2951 | ||
2952 | mutex_lock(&id_priv->handler_mutex); | |
2953 | if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || | |
2954 | READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) | |
2955 | goto out_unlock; | |
2956 | ||
2957 | ret = cma_cm_event_handler(id_priv, event); | |
2958 | WARN_ON(ret); | |
2959 | ||
2960 | out_unlock: | |
2961 | mutex_unlock(&id_priv->handler_mutex); | |
2962 | if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN) | |
2963 | rdma_destroy_ah_attr(&event->param.ud.ah_attr); | |
2964 | } | |
2965 | ||
c4028958 | 2966 | static void cma_work_handler(struct work_struct *_work) |
e51060f0 | 2967 | { |
c4028958 | 2968 | struct cma_work *work = container_of(_work, struct cma_work, work); |
e51060f0 | 2969 | struct rdma_id_private *id_priv = work->id; |
e51060f0 | 2970 | |
de910bd9 | 2971 | mutex_lock(&id_priv->handler_mutex); |
7e85bcda JG |
2972 | if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || |
2973 | READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) | |
f6a9d47a | 2974 | goto out_unlock; |
7e85bcda JG |
2975 | if (work->old_state != 0 || work->new_state != 0) { |
2976 | if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) | |
2977 | goto out_unlock; | |
e51060f0 | 2978 | } |
f6a9d47a | 2979 | |
ed999f82 | 2980 | if (cma_cm_event_handler(id_priv, &work->event)) { |
f6a9d47a JG |
2981 | cma_id_put(id_priv); |
2982 | destroy_id_handler_unlock(id_priv); | |
2983 | goto out_free; | |
dd5bdff8 OG |
2984 | } |
2985 | ||
f6a9d47a | 2986 | out_unlock: |
dd5bdff8 | 2987 | mutex_unlock(&id_priv->handler_mutex); |
e368d23f | 2988 | cma_id_put(id_priv); |
f6a9d47a | 2989 | out_free: |
b5de0c60 JG |
2990 | if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN) |
2991 | rdma_destroy_ah_attr(&work->event.param.ud.ah_attr); | |
dd5bdff8 OG |
2992 | kfree(work); |
2993 | } | |
2994 | ||
981b5a23 PP |
2995 | static void cma_init_resolve_route_work(struct cma_work *work, |
2996 | struct rdma_id_private *id_priv) | |
2997 | { | |
2998 | work->id = id_priv; | |
2999 | INIT_WORK(&work->work, cma_work_handler); | |
3000 | work->old_state = RDMA_CM_ROUTE_QUERY; | |
3001 | work->new_state = RDMA_CM_ROUTE_RESOLVED; | |
3002 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; | |
3003 | } | |
3004 | ||
081ea519 PP |
3005 | static void enqueue_resolve_addr_work(struct cma_work *work, |
3006 | struct rdma_id_private *id_priv) | |
981b5a23 | 3007 | { |
e368d23f PP |
3008 | /* Balances with cma_id_put() in cma_work_handler */ |
3009 | cma_id_get(id_priv); | |
081ea519 | 3010 | |
981b5a23 PP |
3011 | work->id = id_priv; |
3012 | INIT_WORK(&work->work, cma_work_handler); | |
3013 | work->old_state = RDMA_CM_ADDR_QUERY; | |
3014 | work->new_state = RDMA_CM_ADDR_RESOLVED; | |
3015 | work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; | |
081ea519 PP |
3016 | |
3017 | queue_work(cma_wq, &work->work); | |
981b5a23 PP |
3018 | } |
3019 | ||
dbace111 LR |
3020 | static int cma_resolve_ib_route(struct rdma_id_private *id_priv, |
3021 | unsigned long timeout_ms) | |
e51060f0 SH |
3022 | { |
3023 | struct rdma_route *route = &id_priv->id.route; | |
3024 | struct cma_work *work; | |
3025 | int ret; | |
3026 | ||
3027 | work = kzalloc(sizeof *work, GFP_KERNEL); | |
3028 | if (!work) | |
3029 | return -ENOMEM; | |
3030 | ||
981b5a23 | 3031 | cma_init_resolve_route_work(work, id_priv); |
e51060f0 | 3032 | |
74f160ea GR |
3033 | if (!route->path_rec) |
3034 | route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); | |
e51060f0 SH |
3035 | if (!route->path_rec) { |
3036 | ret = -ENOMEM; | |
3037 | goto err1; | |
3038 | } | |
3039 | ||
3040 | ret = cma_query_ib_route(id_priv, timeout_ms, work); | |
3041 | if (ret) | |
3042 | goto err2; | |
3043 | ||
3044 | return 0; | |
3045 | err2: | |
3046 | kfree(route->path_rec); | |
3047 | route->path_rec = NULL; | |
3048 | err1: | |
3049 | kfree(work); | |
3050 | return ret; | |
3051 | } | |
3052 | ||
9327c7af PP |
3053 | static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type, |
3054 | unsigned long supported_gids, | |
3055 | enum ib_gid_type default_gid) | |
3056 | { | |
3057 | if ((network_type == RDMA_NETWORK_IPV4 || | |
3058 | network_type == RDMA_NETWORK_IPV6) && | |
3059 | test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids)) | |
3060 | return IB_GID_TYPE_ROCE_UDP_ENCAP; | |
3061 | ||
3062 | return default_gid; | |
3063 | } | |
3064 | ||
3065 | /* | |
3066 | * cma_iboe_set_path_rec_l2_fields() is helper function which sets | |
3067 | * path record type based on GID type. | |
3068 | * It also sets up other L2 fields which includes destination mac address | |
3069 | * netdev ifindex, of the path record. | |
3070 | * It returns the netdev of the bound interface for this path record entry. | |
3071 | */ | |
3072 | static struct net_device * | |
3073 | cma_iboe_set_path_rec_l2_fields(struct rdma_id_private *id_priv) | |
3074 | { | |
3075 | struct rdma_route *route = &id_priv->id.route; | |
3076 | enum ib_gid_type gid_type = IB_GID_TYPE_ROCE; | |
3077 | struct rdma_addr *addr = &route->addr; | |
3078 | unsigned long supported_gids; | |
3079 | struct net_device *ndev; | |
3080 | ||
3081 | if (!addr->dev_addr.bound_dev_if) | |
3082 | return NULL; | |
3083 | ||
3084 | ndev = dev_get_by_index(addr->dev_addr.net, | |
3085 | addr->dev_addr.bound_dev_if); | |
3086 | if (!ndev) | |
3087 | return NULL; | |
3088 | ||
3089 | supported_gids = roce_gid_type_mask_support(id_priv->id.device, | |
3090 | id_priv->id.port_num); | |
3091 | gid_type = cma_route_gid_type(addr->dev_addr.network, | |
3092 | supported_gids, | |
3093 | id_priv->gid_type); | |
3094 | /* Use the hint from IP Stack to select GID Type */ | |
3095 | if (gid_type < ib_network_to_gid_type(addr->dev_addr.network)) | |
3096 | gid_type = ib_network_to_gid_type(addr->dev_addr.network); | |
3097 | route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type); | |
3098 | ||
114cc9c4 | 3099 | route->path_rec->roce.route_resolved = true; |
9327c7af PP |
3100 | sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr); |
3101 | return ndev; | |
3102 | } | |
3103 | ||
fe75889f PP |
3104 | int rdma_set_ib_path(struct rdma_cm_id *id, |
3105 | struct sa_path_rec *path_rec) | |
e51060f0 SH |
3106 | { |
3107 | struct rdma_id_private *id_priv; | |
8d20a1f0 | 3108 | struct net_device *ndev; |
e51060f0 SH |
3109 | int ret; |
3110 | ||
3111 | id_priv = container_of(id, struct rdma_id_private, id); | |
550e5ca7 NM |
3112 | if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, |
3113 | RDMA_CM_ROUTE_RESOLVED)) | |
e51060f0 SH |
3114 | return -EINVAL; |
3115 | ||
fe75889f | 3116 | id->route.path_rec = kmemdup(path_rec, sizeof(*path_rec), |
9893e742 | 3117 | GFP_KERNEL); |
e51060f0 SH |
3118 | if (!id->route.path_rec) { |
3119 | ret = -ENOMEM; | |
3120 | goto err; | |
3121 | } | |
3122 | ||
8d20a1f0 PP |
3123 | if (rdma_protocol_roce(id->device, id->port_num)) { |
3124 | ndev = cma_iboe_set_path_rec_l2_fields(id_priv); | |
3125 | if (!ndev) { | |
3126 | ret = -ENODEV; | |
3127 | goto err_free; | |
3128 | } | |
3129 | dev_put(ndev); | |
3130 | } | |
3131 | ||
bf9a9928 | 3132 | id->route.num_pri_alt_paths = 1; |
e51060f0 | 3133 | return 0; |
8d20a1f0 PP |
3134 | |
3135 | err_free: | |
3136 | kfree(id->route.path_rec); | |
3137 | id->route.path_rec = NULL; | |
e51060f0 | 3138 | err: |
550e5ca7 | 3139 | cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); |
e51060f0 SH |
3140 | return ret; |
3141 | } | |
fe75889f | 3142 | EXPORT_SYMBOL(rdma_set_ib_path); |
e51060f0 | 3143 | |
d6f91252 | 3144 | static int cma_resolve_iw_route(struct rdma_id_private *id_priv) |
07ebafba TT |
3145 | { |
3146 | struct cma_work *work; | |
3147 | ||
3148 | work = kzalloc(sizeof *work, GFP_KERNEL); | |
3149 | if (!work) | |
3150 | return -ENOMEM; | |
3151 | ||
981b5a23 | 3152 | cma_init_resolve_route_work(work, id_priv); |
07ebafba TT |
3153 | queue_work(cma_wq, &work->work); |
3154 | return 0; | |
3155 | } | |
3156 | ||
d3bd9396 | 3157 | static int get_vlan_ndev_tc(struct net_device *vlan_ndev, int prio) |
eb072c4b | 3158 | { |
eb072c4b EP |
3159 | struct net_device *dev; |
3160 | ||
d3bd9396 | 3161 | dev = vlan_dev_real_dev(vlan_ndev); |
eb072c4b EP |
3162 | if (dev->num_tc) |
3163 | return netdev_get_prio_tc_map(dev, prio); | |
3164 | ||
d3bd9396 PP |
3165 | return (vlan_dev_get_egress_qos_mask(vlan_ndev, prio) & |
3166 | VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; | |
3167 | } | |
3168 | ||
3169 | struct iboe_prio_tc_map { | |
3170 | int input_prio; | |
3171 | int output_tc; | |
3172 | bool found; | |
3173 | }; | |
3174 | ||
eff74233 TY |
3175 | static int get_lower_vlan_dev_tc(struct net_device *dev, |
3176 | struct netdev_nested_priv *priv) | |
d3bd9396 | 3177 | { |
eff74233 | 3178 | struct iboe_prio_tc_map *map = (struct iboe_prio_tc_map *)priv->data; |
d3bd9396 PP |
3179 | |
3180 | if (is_vlan_dev(dev)) | |
3181 | map->output_tc = get_vlan_ndev_tc(dev, map->input_prio); | |
3182 | else if (dev->num_tc) | |
3183 | map->output_tc = netdev_get_prio_tc_map(dev, map->input_prio); | |
3184 | else | |
3185 | map->output_tc = 0; | |
3186 | /* We are interested only in first level VLAN device, so always | |
3187 | * return 1 to stop iterating over next level devices. | |
3188 | */ | |
3189 | map->found = true; | |
3190 | return 1; | |
3191 | } | |
3192 | ||
3193 | static int iboe_tos_to_sl(struct net_device *ndev, int tos) | |
3194 | { | |
3195 | struct iboe_prio_tc_map prio_tc_map = {}; | |
3196 | int prio = rt_tos2priority(tos); | |
eff74233 | 3197 | struct netdev_nested_priv priv; |
d3bd9396 PP |
3198 | |
3199 | /* If VLAN device, get it directly from the VLAN netdev */ | |
d0d7b10b | 3200 | if (is_vlan_dev(ndev)) |
d3bd9396 PP |
3201 | return get_vlan_ndev_tc(ndev, prio); |
3202 | ||
3203 | prio_tc_map.input_prio = prio; | |
eff74233 | 3204 | priv.data = (void *)&prio_tc_map; |
d3bd9396 PP |
3205 | rcu_read_lock(); |
3206 | netdev_walk_all_lower_dev_rcu(ndev, | |
3207 | get_lower_vlan_dev_tc, | |
eff74233 | 3208 | &priv); |
d3bd9396 PP |
3209 | rcu_read_unlock(); |
3210 | /* If map is found from lower device, use it; Otherwise | |
3211 | * continue with the current netdevice to get priority to tc map. | |
3212 | */ | |
3213 | if (prio_tc_map.found) | |
3214 | return prio_tc_map.output_tc; | |
3215 | else if (ndev->num_tc) | |
3216 | return netdev_get_prio_tc_map(ndev, prio); | |
3217 | else | |
3218 | return 0; | |
eb072c4b EP |
3219 | } |
3220 | ||
f6653405 MZ |
3221 | static __be32 cma_get_roce_udp_flow_label(struct rdma_id_private *id_priv) |
3222 | { | |
3223 | struct sockaddr_in6 *addr6; | |
3224 | u16 dport, sport; | |
3225 | u32 hash, fl; | |
3226 | ||
3227 | addr6 = (struct sockaddr_in6 *)cma_src_addr(id_priv); | |
3228 | fl = be32_to_cpu(addr6->sin6_flowinfo) & IB_GRH_FLOWLABEL_MASK; | |
3229 | if ((cma_family(id_priv) != AF_INET6) || !fl) { | |
3230 | dport = be16_to_cpu(cma_port(cma_dst_addr(id_priv))); | |
3231 | sport = be16_to_cpu(cma_port(cma_src_addr(id_priv))); | |
3232 | hash = (u32)sport * 31 + dport; | |
3233 | fl = hash & IB_GRH_FLOWLABEL_MASK; | |
3234 | } | |
3235 | ||
3236 | return cpu_to_be32(fl); | |
3237 | } | |
3238 | ||
3c86aa70 EC |
3239 | static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) |
3240 | { | |
3241 | struct rdma_route *route = &id_priv->id.route; | |
3242 | struct rdma_addr *addr = &route->addr; | |
3243 | struct cma_work *work; | |
3244 | int ret; | |
4367ec7f | 3245 | struct net_device *ndev; |
4367ec7f | 3246 | |
89052d78 MD |
3247 | u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num - |
3248 | rdma_start_port(id_priv->cma_dev->device)]; | |
ca0c448d | 3249 | u8 tos; |
dd5f03be | 3250 | |
ca0c448d HB |
3251 | mutex_lock(&id_priv->qp_mutex); |
3252 | tos = id_priv->tos_set ? id_priv->tos : default_roce_tos; | |
3253 | mutex_unlock(&id_priv->qp_mutex); | |
3c86aa70 | 3254 | |
3c86aa70 EC |
3255 | work = kzalloc(sizeof *work, GFP_KERNEL); |
3256 | if (!work) | |
3257 | return -ENOMEM; | |
3258 | ||
3c86aa70 EC |
3259 | route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL); |
3260 | if (!route->path_rec) { | |
3261 | ret = -ENOMEM; | |
3262 | goto err1; | |
3263 | } | |
3264 | ||
bf9a9928 | 3265 | route->num_pri_alt_paths = 1; |
3c86aa70 | 3266 | |
9327c7af | 3267 | ndev = cma_iboe_set_path_rec_l2_fields(id_priv); |
3c86aa70 EC |
3268 | if (!ndev) { |
3269 | ret = -ENODEV; | |
3270 | goto err2; | |
3271 | } | |
3272 | ||
7b85627b MS |
3273 | rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, |
3274 | &route->path_rec->sgid); | |
3275 | rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr, | |
3276 | &route->path_rec->dgid); | |
af7bd463 | 3277 | |
c3efe750 | 3278 | if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB) |
c865f246 | 3279 | /* TODO: get the hoplimit from the inet/inet6 device */ |
c3efe750 MB |
3280 | route->path_rec->hop_limit = addr->dev_addr.hoplimit; |
3281 | else | |
c865f246 | 3282 | route->path_rec->hop_limit = 1; |
af7bd463 EC |
3283 | route->path_rec->reversible = 1; |
3284 | route->path_rec->pkey = cpu_to_be16(0xffff); | |
3285 | route->path_rec->mtu_selector = IB_SA_EQ; | |
89052d78 MD |
3286 | route->path_rec->sl = iboe_tos_to_sl(ndev, tos); |
3287 | route->path_rec->traffic_class = tos; | |
3c86aa70 EC |
3288 | route->path_rec->mtu = iboe_get_mtu(ndev->mtu); |
3289 | route->path_rec->rate_selector = IB_SA_EQ; | |
3290 | route->path_rec->rate = iboe_get_rate(ndev); | |
3291 | dev_put(ndev); | |
3292 | route->path_rec->packet_life_time_selector = IB_SA_EQ; | |
e1ee1e62 DM |
3293 | /* In case ACK timeout is set, use this value to calculate |
3294 | * PacketLifeTime. As per IBTA 12.7.34, | |
3295 | * local ACK timeout = (2 * PacketLifeTime + Local CA’s ACK delay). | |
3296 | * Assuming a negligible local ACK delay, we can use | |
3297 | * PacketLifeTime = local ACK timeout/2 | |
3298 | * as a reasonable approximation for RoCE networks. | |
3299 | */ | |
ca0c448d | 3300 | mutex_lock(&id_priv->qp_mutex); |
e84045ea HB |
3301 | if (id_priv->timeout_set && id_priv->timeout) |
3302 | route->path_rec->packet_life_time = id_priv->timeout - 1; | |
3303 | else | |
3304 | route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME; | |
ca0c448d | 3305 | mutex_unlock(&id_priv->qp_mutex); |
e1ee1e62 | 3306 | |
3c86aa70 EC |
3307 | if (!route->path_rec->mtu) { |
3308 | ret = -EINVAL; | |
3309 | goto err2; | |
3310 | } | |
3311 | ||
f6653405 MZ |
3312 | if (rdma_protocol_roce_udp_encap(id_priv->id.device, |
3313 | id_priv->id.port_num)) | |
3314 | route->path_rec->flow_label = | |
3315 | cma_get_roce_udp_flow_label(id_priv); | |
3316 | ||
981b5a23 | 3317 | cma_init_resolve_route_work(work, id_priv); |
3c86aa70 EC |
3318 | queue_work(cma_wq, &work->work); |
3319 | ||
3320 | return 0; | |
3321 | ||
3322 | err2: | |
3323 | kfree(route->path_rec); | |
3324 | route->path_rec = NULL; | |
bf9a9928 | 3325 | route->num_pri_alt_paths = 0; |
3c86aa70 EC |
3326 | err1: |
3327 | kfree(work); | |
3328 | return ret; | |
3329 | } | |
3330 | ||
dbace111 | 3331 | int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms) |
e51060f0 SH |
3332 | { |
3333 | struct rdma_id_private *id_priv; | |
3334 | int ret; | |
3335 | ||
5f5a6509 HB |
3336 | if (!timeout_ms) |
3337 | return -EINVAL; | |
3338 | ||
e51060f0 | 3339 | id_priv = container_of(id, struct rdma_id_private, id); |
550e5ca7 | 3340 | if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) |
e51060f0 SH |
3341 | return -EINVAL; |
3342 | ||
e368d23f | 3343 | cma_id_get(id_priv); |
fe53ba2f | 3344 | if (rdma_cap_ib_sa(id->device, id->port_num)) |
c72f2189 | 3345 | ret = cma_resolve_ib_route(id_priv, timeout_ms); |
fc008bdb | 3346 | else if (rdma_protocol_roce(id->device, id->port_num)) { |
c72f2189 | 3347 | ret = cma_resolve_iboe_route(id_priv); |
fc008bdb PH |
3348 | if (!ret) |
3349 | cma_add_id_to_tree(id_priv); | |
3350 | } | |
c72f2189 | 3351 | else if (rdma_protocol_iwarp(id->device, id->port_num)) |
d6f91252 | 3352 | ret = cma_resolve_iw_route(id_priv); |
c72f2189 | 3353 | else |
e51060f0 | 3354 | ret = -ENOSYS; |
c72f2189 | 3355 | |
e51060f0 SH |
3356 | if (ret) |
3357 | goto err; | |
3358 | ||
3359 | return 0; | |
3360 | err: | |
550e5ca7 | 3361 | cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); |
e368d23f | 3362 | cma_id_put(id_priv); |
e51060f0 SH |
3363 | return ret; |
3364 | } | |
3365 | EXPORT_SYMBOL(rdma_resolve_route); | |
3366 | ||
6a3e362d SH |
3367 | static void cma_set_loopback(struct sockaddr *addr) |
3368 | { | |
3369 | switch (addr->sa_family) { | |
3370 | case AF_INET: | |
3371 | ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK); | |
3372 | break; | |
3373 | case AF_INET6: | |
3374 | ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr, | |
3375 | 0, 0, 0, htonl(1)); | |
3376 | break; | |
3377 | default: | |
3378 | ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr, | |
3379 | 0, 0, 0, htonl(1)); | |
3380 | break; | |
3381 | } | |
3382 | } | |
3383 | ||
e51060f0 SH |
3384 | static int cma_bind_loopback(struct rdma_id_private *id_priv) |
3385 | { | |
b0569e40 | 3386 | struct cma_device *cma_dev, *cur_dev; |
f0ee3404 | 3387 | union ib_gid gid; |
102c5ce0 | 3388 | enum ib_port_state port_state; |
cc055dd3 | 3389 | unsigned int p; |
e51060f0 SH |
3390 | u16 pkey; |
3391 | int ret; | |
e51060f0 | 3392 | |
b0569e40 | 3393 | cma_dev = NULL; |
e51060f0 | 3394 | mutex_lock(&lock); |
b0569e40 SH |
3395 | list_for_each_entry(cur_dev, &dev_list, list) { |
3396 | if (cma_family(id_priv) == AF_IB && | |
72219cea | 3397 | !rdma_cap_ib_cm(cur_dev->device, 1)) |
b0569e40 SH |
3398 | continue; |
3399 | ||
3400 | if (!cma_dev) | |
3401 | cma_dev = cur_dev; | |
3402 | ||
cc055dd3 | 3403 | rdma_for_each_port (cur_dev->device, p) { |
102c5ce0 JW |
3404 | if (!ib_get_cached_port_state(cur_dev->device, p, &port_state) && |
3405 | port_state == IB_PORT_ACTIVE) { | |
b0569e40 SH |
3406 | cma_dev = cur_dev; |
3407 | goto port_found; | |
3408 | } | |
3409 | } | |
3410 | } | |
3411 | ||
3412 | if (!cma_dev) { | |
e82153b5 KK |
3413 | ret = -ENODEV; |
3414 | goto out; | |
3415 | } | |
e51060f0 | 3416 | |
e82153b5 | 3417 | p = 1; |
e51060f0 SH |
3418 | |
3419 | port_found: | |
1dfce294 | 3420 | ret = rdma_query_gid(cma_dev->device, p, 0, &gid); |
e51060f0 SH |
3421 | if (ret) |
3422 | goto out; | |
3423 | ||
3424 | ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); | |
3425 | if (ret) | |
3426 | goto out; | |
3427 | ||
6f8372b6 | 3428 | id_priv->id.route.addr.dev_addr.dev_type = |
21655afc | 3429 | (rdma_protocol_ib(cma_dev->device, p)) ? |
6f8372b6 SH |
3430 | ARPHRD_INFINIBAND : ARPHRD_ETHER; |
3431 | ||
3432 | rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); | |
e51060f0 SH |
3433 | ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); |
3434 | id_priv->id.port_num = p; | |
3435 | cma_attach_to_dev(id_priv, cma_dev); | |
cb5cd0ea | 3436 | rdma_restrack_add(&id_priv->res); |
f4753834 | 3437 | cma_set_loopback(cma_src_addr(id_priv)); |
e51060f0 SH |
3438 | out: |
3439 | mutex_unlock(&lock); | |
3440 | return ret; | |
3441 | } | |
3442 | ||
3443 | static void addr_handler(int status, struct sockaddr *src_addr, | |
3444 | struct rdma_dev_addr *dev_addr, void *context) | |
3445 | { | |
3446 | struct rdma_id_private *id_priv = context; | |
7582df82 | 3447 | struct rdma_cm_event event = {}; |
5fc01fb8 MJ |
3448 | struct sockaddr *addr; |
3449 | struct sockaddr_storage old_addr; | |
e51060f0 | 3450 | |
de910bd9 | 3451 | mutex_lock(&id_priv->handler_mutex); |
550e5ca7 NM |
3452 | if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, |
3453 | RDMA_CM_ADDR_RESOLVED)) | |
61a73c70 | 3454 | goto out; |
61a73c70 | 3455 | |
5fc01fb8 MJ |
3456 | /* |
3457 | * Store the previous src address, so that if we fail to acquire | |
3458 | * matching rdma device, old address can be restored back, which helps | |
3459 | * to cancel the cma listen operation correctly. | |
3460 | */ | |
3461 | addr = cma_src_addr(id_priv); | |
3462 | memcpy(&old_addr, addr, rdma_addr_size(addr)); | |
3463 | memcpy(addr, src_addr, rdma_addr_size(src_addr)); | |
498683c6 | 3464 | if (!status && !id_priv->cma_dev) { |
ff11c6cd | 3465 | status = cma_acquire_dev_by_src_ip(id_priv); |
498683c6 MS |
3466 | if (status) |
3467 | pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n", | |
3468 | status); | |
cb5cd0ea | 3469 | rdma_restrack_add(&id_priv->res); |
a6e4d254 | 3470 | } else if (status) { |
498683c6 MS |
3471 | pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status); |
3472 | } | |
e51060f0 SH |
3473 | |
3474 | if (status) { | |
5fc01fb8 MJ |
3475 | memcpy(addr, &old_addr, |
3476 | rdma_addr_size((struct sockaddr *)&old_addr)); | |
550e5ca7 NM |
3477 | if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, |
3478 | RDMA_CM_ADDR_BOUND)) | |
e51060f0 | 3479 | goto out; |
a1b1b61f SH |
3480 | event.event = RDMA_CM_EVENT_ADDR_ERROR; |
3481 | event.status = status; | |
7b85627b | 3482 | } else |
a1b1b61f | 3483 | event.event = RDMA_CM_EVENT_ADDR_RESOLVED; |
e51060f0 | 3484 | |
ed999f82 | 3485 | if (cma_cm_event_handler(id_priv, &event)) { |
f6a9d47a | 3486 | destroy_id_handler_unlock(id_priv); |
e51060f0 SH |
3487 | return; |
3488 | } | |
3489 | out: | |
de910bd9 | 3490 | mutex_unlock(&id_priv->handler_mutex); |
e51060f0 SH |
3491 | } |
3492 | ||
3493 | static int cma_resolve_loopback(struct rdma_id_private *id_priv) | |
3494 | { | |
3495 | struct cma_work *work; | |
f0ee3404 | 3496 | union ib_gid gid; |
e51060f0 SH |
3497 | int ret; |
3498 | ||
3499 | work = kzalloc(sizeof *work, GFP_KERNEL); | |
3500 | if (!work) | |
3501 | return -ENOMEM; | |
3502 | ||
3503 | if (!id_priv->cma_dev) { | |
3504 | ret = cma_bind_loopback(id_priv); | |
3505 | if (ret) | |
3506 | goto err; | |
3507 | } | |
3508 | ||
6f8372b6 SH |
3509 | rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); |
3510 | rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); | |
e51060f0 | 3511 | |
081ea519 | 3512 | enqueue_resolve_addr_work(work, id_priv); |
e51060f0 SH |
3513 | return 0; |
3514 | err: | |
3515 | kfree(work); | |
3516 | return ret; | |
3517 | } | |
3518 | ||
f17df3b0 SH |
3519 | static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) |
3520 | { | |
3521 | struct cma_work *work; | |
3522 | int ret; | |
3523 | ||
3524 | work = kzalloc(sizeof *work, GFP_KERNEL); | |
3525 | if (!work) | |
3526 | return -ENOMEM; | |
3527 | ||
3528 | if (!id_priv->cma_dev) { | |
3529 | ret = cma_resolve_ib_dev(id_priv); | |
3530 | if (ret) | |
3531 | goto err; | |
3532 | } | |
3533 | ||
3534 | rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) | |
3535 | &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); | |
3536 | ||
081ea519 | 3537 | enqueue_resolve_addr_work(work, id_priv); |
f17df3b0 SH |
3538 | return 0; |
3539 | err: | |
3540 | kfree(work); | |
3541 | return ret; | |
3542 | } | |
3543 | ||
e51060f0 | 3544 | static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, |
2df7dba8 | 3545 | const struct sockaddr *dst_addr) |
e51060f0 | 3546 | { |
22e9f710 JG |
3547 | struct sockaddr_storage zero_sock = {}; |
3548 | ||
3549 | if (src_addr && src_addr->sa_family) | |
3550 | return rdma_bind_addr(id, src_addr); | |
3551 | ||
3552 | /* | |
3553 | * When the src_addr is not specified, automatically supply an any addr | |
3554 | */ | |
3555 | zero_sock.ss_family = dst_addr->sa_family; | |
3556 | if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) { | |
3557 | struct sockaddr_in6 *src_addr6 = | |
3558 | (struct sockaddr_in6 *)&zero_sock; | |
3559 | struct sockaddr_in6 *dst_addr6 = | |
3560 | (struct sockaddr_in6 *)dst_addr; | |
3561 | ||
3562 | src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; | |
3563 | if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) | |
3564 | id->route.addr.dev_addr.bound_dev_if = | |
3565 | dst_addr6->sin6_scope_id; | |
3566 | } else if (dst_addr->sa_family == AF_IB) { | |
3567 | ((struct sockaddr_ib *)&zero_sock)->sib_pkey = | |
3568 | ((struct sockaddr_ib *)dst_addr)->sib_pkey; | |
3569 | } | |
3570 | return rdma_bind_addr(id, (struct sockaddr *)&zero_sock); | |
e51060f0 SH |
3571 | } |
3572 | ||
732d41c5 JG |
3573 | /* |
3574 | * If required, resolve the source address for bind and leave the id_priv in | |
3575 | * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior | |
3576 | * calls made by ULP, a previously bound ID will not be re-bound and src_addr is | |
3577 | * ignored. | |
3578 | */ | |
3579 | static int resolve_prepare_src(struct rdma_id_private *id_priv, | |
3580 | struct sockaddr *src_addr, | |
3581 | const struct sockaddr *dst_addr) | |
e51060f0 | 3582 | { |
e51060f0 SH |
3583 | int ret; |
3584 | ||
e4103312 | 3585 | memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); |
732d41c5 JG |
3586 | if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) { |
3587 | /* For a well behaved ULP state will be RDMA_CM_IDLE */ | |
3588 | ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr); | |
3589 | if (ret) | |
3590 | goto err_dst; | |
3591 | if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, | |
3592 | RDMA_CM_ADDR_QUERY))) { | |
3593 | ret = -EINVAL; | |
3594 | goto err_dst; | |
e4103312 | 3595 | } |
e51060f0 SH |
3596 | } |
3597 | ||
e4103312 | 3598 | if (cma_family(id_priv) != dst_addr->sa_family) { |
732d41c5 JG |
3599 | ret = -EINVAL; |
3600 | goto err_state; | |
e4103312 | 3601 | } |
732d41c5 | 3602 | return 0; |
4ae7152e | 3603 | |
732d41c5 JG |
3604 | err_state: |
3605 | cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); | |
3606 | err_dst: | |
3607 | memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr)); | |
3608 | return ret; | |
3609 | } | |
3610 | ||
3611 | int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, | |
3612 | const struct sockaddr *dst_addr, unsigned long timeout_ms) | |
3613 | { | |
3614 | struct rdma_id_private *id_priv = | |
3615 | container_of(id, struct rdma_id_private, id); | |
3616 | int ret; | |
3617 | ||
3618 | ret = resolve_prepare_src(id_priv, src_addr, dst_addr); | |
3619 | if (ret) | |
3620 | return ret; | |
e51060f0 | 3621 | |
f17df3b0 | 3622 | if (cma_any_addr(dst_addr)) { |
e51060f0 | 3623 | ret = cma_resolve_loopback(id_priv); |
f17df3b0 SH |
3624 | } else { |
3625 | if (dst_addr->sa_family == AF_IB) { | |
3626 | ret = cma_resolve_ib_addr(id_priv); | |
3627 | } else { | |
305d568b JG |
3628 | /* |
3629 | * The FSM can return back to RDMA_CM_ADDR_BOUND after | |
3630 | * rdma_resolve_ip() is called, eg through the error | |
3631 | * path in addr_handler(). If this happens the existing | |
3632 | * request must be canceled before issuing a new one. | |
3633 | * Since canceling a request is a bit slow and this | |
3634 | * oddball path is rare, keep track once a request has | |
3635 | * been issued. The track turns out to be a permanent | |
3636 | * state since this is the only cancel as it is | |
3637 | * immediately before rdma_resolve_ip(). | |
3638 | */ | |
3639 | if (id_priv->used_resolve_ip) | |
3640 | rdma_addr_cancel(&id->route.addr.dev_addr); | |
3641 | else | |
3642 | id_priv->used_resolve_ip = 1; | |
0e9d2c19 PP |
3643 | ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr, |
3644 | &id->route.addr.dev_addr, | |
3645 | timeout_ms, addr_handler, | |
3646 | false, id_priv); | |
f17df3b0 SH |
3647 | } |
3648 | } | |
e51060f0 SH |
3649 | if (ret) |
3650 | goto err; | |
3651 | ||
3652 | return 0; | |
3653 | err: | |
550e5ca7 | 3654 | cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); |
e51060f0 SH |
3655 | return ret; |
3656 | } | |
3657 | EXPORT_SYMBOL(rdma_resolve_addr); | |
3658 | ||
a9bb7912 HS |
3659 | int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) |
3660 | { | |
3661 | struct rdma_id_private *id_priv; | |
3662 | unsigned long flags; | |
3663 | int ret; | |
3664 | ||
3665 | id_priv = container_of(id, struct rdma_id_private, id); | |
3666 | spin_lock_irqsave(&id_priv->lock, flags); | |
d490ee52 JG |
3667 | if ((reuse && id_priv->state != RDMA_CM_LISTEN) || |
3668 | id_priv->state == RDMA_CM_IDLE) { | |
a9bb7912 HS |
3669 | id_priv->reuseaddr = reuse; |
3670 | ret = 0; | |
3671 | } else { | |
3672 | ret = -EINVAL; | |
3673 | } | |
3674 | spin_unlock_irqrestore(&id_priv->lock, flags); | |
3675 | return ret; | |
3676 | } | |
3677 | EXPORT_SYMBOL(rdma_set_reuseaddr); | |
3678 | ||
68602120 SH |
3679 | int rdma_set_afonly(struct rdma_cm_id *id, int afonly) |
3680 | { | |
3681 | struct rdma_id_private *id_priv; | |
3682 | unsigned long flags; | |
3683 | int ret; | |
3684 | ||
3685 | id_priv = container_of(id, struct rdma_id_private, id); | |
3686 | spin_lock_irqsave(&id_priv->lock, flags); | |
3687 | if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { | |
3688 | id_priv->options |= (1 << CMA_OPTION_AFONLY); | |
3689 | id_priv->afonly = afonly; | |
3690 | ret = 0; | |
3691 | } else { | |
3692 | ret = -EINVAL; | |
3693 | } | |
3694 | spin_unlock_irqrestore(&id_priv->lock, flags); | |
3695 | return ret; | |
3696 | } | |
3697 | EXPORT_SYMBOL(rdma_set_afonly); | |
3698 | ||
e51060f0 SH |
3699 | static void cma_bind_port(struct rdma_bind_list *bind_list, |
3700 | struct rdma_id_private *id_priv) | |
3701 | { | |
58afdcb7 SH |
3702 | struct sockaddr *addr; |
3703 | struct sockaddr_ib *sib; | |
3704 | u64 sid, mask; | |
3705 | __be16 port; | |
e51060f0 | 3706 | |
730c8912 MZ |
3707 | lockdep_assert_held(&lock); |
3708 | ||
f4753834 | 3709 | addr = cma_src_addr(id_priv); |
58afdcb7 SH |
3710 | port = htons(bind_list->port); |
3711 | ||
3712 | switch (addr->sa_family) { | |
3713 | case AF_INET: | |
3714 | ((struct sockaddr_in *) addr)->sin_port = port; | |
3715 | break; | |
3716 | case AF_INET6: | |
3717 | ((struct sockaddr_in6 *) addr)->sin6_port = port; | |
3718 | break; | |
3719 | case AF_IB: | |
3720 | sib = (struct sockaddr_ib *) addr; | |
3721 | sid = be64_to_cpu(sib->sib_sid); | |
3722 | mask = be64_to_cpu(sib->sib_sid_mask); | |
3723 | sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port)); | |
3724 | sib->sib_sid_mask = cpu_to_be64(~0ULL); | |
3725 | break; | |
3726 | } | |
e51060f0 SH |
3727 | id_priv->bind_list = bind_list; |
3728 | hlist_add_head(&id_priv->node, &bind_list->owners); | |
3729 | } | |
3730 | ||
2253fc0c | 3731 | static int cma_alloc_port(enum rdma_ucm_port_space ps, |
aac978e1 | 3732 | struct rdma_id_private *id_priv, unsigned short snum) |
e51060f0 SH |
3733 | { |
3734 | struct rdma_bind_list *bind_list; | |
3b069c5d | 3735 | int ret; |
e51060f0 | 3736 | |
730c8912 MZ |
3737 | lockdep_assert_held(&lock); |
3738 | ||
cb164b8c | 3739 | bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); |
e51060f0 SH |
3740 | if (!bind_list) |
3741 | return -ENOMEM; | |
3742 | ||
fa20105e GS |
3743 | ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list, |
3744 | snum); | |
3b069c5d TH |
3745 | if (ret < 0) |
3746 | goto err; | |
aedec080 SH |
3747 | |
3748 | bind_list->ps = ps; | |
061ccb52 | 3749 | bind_list->port = snum; |
aedec080 SH |
3750 | cma_bind_port(bind_list, id_priv); |
3751 | return 0; | |
3b069c5d | 3752 | err: |
aedec080 | 3753 | kfree(bind_list); |
3b069c5d | 3754 | return ret == -ENOSPC ? -EADDRNOTAVAIL : ret; |
aedec080 | 3755 | } |
e51060f0 | 3756 | |
19b752a1 MS |
3757 | static int cma_port_is_unique(struct rdma_bind_list *bind_list, |
3758 | struct rdma_id_private *id_priv) | |
3759 | { | |
3760 | struct rdma_id_private *cur_id; | |
3761 | struct sockaddr *daddr = cma_dst_addr(id_priv); | |
3762 | struct sockaddr *saddr = cma_src_addr(id_priv); | |
3763 | __be16 dport = cma_port(daddr); | |
3764 | ||
730c8912 MZ |
3765 | lockdep_assert_held(&lock); |
3766 | ||
19b752a1 MS |
3767 | hlist_for_each_entry(cur_id, &bind_list->owners, node) { |
3768 | struct sockaddr *cur_daddr = cma_dst_addr(cur_id); | |
3769 | struct sockaddr *cur_saddr = cma_src_addr(cur_id); | |
3770 | __be16 cur_dport = cma_port(cur_daddr); | |
3771 | ||
3772 | if (id_priv == cur_id) | |
3773 | continue; | |
3774 | ||
3775 | /* different dest port -> unique */ | |
9dea9a2f TN |
3776 | if (!cma_any_port(daddr) && |
3777 | !cma_any_port(cur_daddr) && | |
19b752a1 MS |
3778 | (dport != cur_dport)) |
3779 | continue; | |
3780 | ||
3781 | /* different src address -> unique */ | |
3782 | if (!cma_any_addr(saddr) && | |
3783 | !cma_any_addr(cur_saddr) && | |
3784 | cma_addr_cmp(saddr, cur_saddr)) | |
3785 | continue; | |
3786 | ||
3787 | /* different dst address -> unique */ | |
9dea9a2f TN |
3788 | if (!cma_any_addr(daddr) && |
3789 | !cma_any_addr(cur_daddr) && | |
19b752a1 MS |
3790 | cma_addr_cmp(daddr, cur_daddr)) |
3791 | continue; | |
3792 | ||
3793 | return -EADDRNOTAVAIL; | |
3794 | } | |
3795 | return 0; | |
3796 | } | |
3797 | ||
2253fc0c | 3798 | static int cma_alloc_any_port(enum rdma_ucm_port_space ps, |
aac978e1 | 3799 | struct rdma_id_private *id_priv) |
aedec080 | 3800 | { |
5d7220e8 TH |
3801 | static unsigned int last_used_port; |
3802 | int low, high, remaining; | |
3803 | unsigned int rover; | |
fa20105e | 3804 | struct net *net = id_priv->id.route.addr.dev_addr.net; |
e51060f0 | 3805 | |
730c8912 MZ |
3806 | lockdep_assert_held(&lock); |
3807 | ||
fa20105e | 3808 | inet_get_local_port_range(net, &low, &high); |
5d7220e8 | 3809 | remaining = (high - low) + 1; |
8032bf12 | 3810 | rover = get_random_u32_below(remaining) + low; |
5d7220e8 | 3811 | retry: |
19b752a1 MS |
3812 | if (last_used_port != rover) { |
3813 | struct rdma_bind_list *bind_list; | |
3814 | int ret; | |
3815 | ||
3816 | bind_list = cma_ps_find(net, ps, (unsigned short)rover); | |
3817 | ||
3818 | if (!bind_list) { | |
3819 | ret = cma_alloc_port(ps, id_priv, rover); | |
3820 | } else { | |
3821 | ret = cma_port_is_unique(bind_list, id_priv); | |
3822 | if (!ret) | |
3823 | cma_bind_port(bind_list, id_priv); | |
3824 | } | |
5d7220e8 TH |
3825 | /* |
3826 | * Remember previously used port number in order to avoid | |
3827 | * re-using same port immediately after it is closed. | |
3828 | */ | |
3829 | if (!ret) | |
3830 | last_used_port = rover; | |
3831 | if (ret != -EADDRNOTAVAIL) | |
3832 | return ret; | |
e51060f0 | 3833 | } |
5d7220e8 TH |
3834 | if (--remaining) { |
3835 | rover++; | |
3836 | if ((rover < low) || (rover > high)) | |
3837 | rover = low; | |
3838 | goto retry; | |
3839 | } | |
3840 | return -EADDRNOTAVAIL; | |
e51060f0 SH |
3841 | } |
3842 | ||
a9bb7912 HS |
3843 | /* |
3844 | * Check that the requested port is available. This is called when trying to | |
3845 | * bind to a specific port, or when trying to listen on a bound port. In | |
3846 | * the latter case, the provided id_priv may already be on the bind_list, but | |
3847 | * we still need to check that it's okay to start listening. | |
3848 | */ | |
3849 | static int cma_check_port(struct rdma_bind_list *bind_list, | |
3850 | struct rdma_id_private *id_priv, uint8_t reuseaddr) | |
e51060f0 SH |
3851 | { |
3852 | struct rdma_id_private *cur_id; | |
43b752da | 3853 | struct sockaddr *addr, *cur_addr; |
e51060f0 | 3854 | |
730c8912 MZ |
3855 | lockdep_assert_held(&lock); |
3856 | ||
f4753834 | 3857 | addr = cma_src_addr(id_priv); |
b67bfe0d | 3858 | hlist_for_each_entry(cur_id, &bind_list->owners, node) { |
a9bb7912 HS |
3859 | if (id_priv == cur_id) |
3860 | continue; | |
3cd96564 | 3861 | |
d490ee52 | 3862 | if (reuseaddr && cur_id->reuseaddr) |
5b0ec991 | 3863 | continue; |
e51060f0 | 3864 | |
f4753834 | 3865 | cur_addr = cma_src_addr(cur_id); |
5b0ec991 SH |
3866 | if (id_priv->afonly && cur_id->afonly && |
3867 | (addr->sa_family != cur_addr->sa_family)) | |
3868 | continue; | |
3869 | ||
3870 | if (cma_any_addr(addr) || cma_any_addr(cur_addr)) | |
3871 | return -EADDRNOTAVAIL; | |
3872 | ||
3873 | if (!cma_addr_cmp(addr, cur_addr)) | |
3874 | return -EADDRINUSE; | |
a9bb7912 | 3875 | } |
e51060f0 SH |
3876 | return 0; |
3877 | } | |
3878 | ||
2253fc0c | 3879 | static int cma_use_port(enum rdma_ucm_port_space ps, |
aac978e1 | 3880 | struct rdma_id_private *id_priv) |
a9bb7912 HS |
3881 | { |
3882 | struct rdma_bind_list *bind_list; | |
3883 | unsigned short snum; | |
3884 | int ret; | |
3885 | ||
730c8912 MZ |
3886 | lockdep_assert_held(&lock); |
3887 | ||
f4753834 | 3888 | snum = ntohs(cma_port(cma_src_addr(id_priv))); |
a9bb7912 HS |
3889 | if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) |
3890 | return -EACCES; | |
3891 | ||
fa20105e | 3892 | bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum); |
a9bb7912 HS |
3893 | if (!bind_list) { |
3894 | ret = cma_alloc_port(ps, id_priv, snum); | |
3895 | } else { | |
3896 | ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); | |
3897 | if (!ret) | |
3898 | cma_bind_port(bind_list, id_priv); | |
3899 | } | |
3900 | return ret; | |
3901 | } | |
3902 | ||
2253fc0c SW |
3903 | static enum rdma_ucm_port_space |
3904 | cma_select_inet_ps(struct rdma_id_private *id_priv) | |
e51060f0 | 3905 | { |
e51060f0 | 3906 | switch (id_priv->id.ps) { |
e51060f0 | 3907 | case RDMA_PS_TCP: |
628e5f6d | 3908 | case RDMA_PS_UDP: |
c8f6a362 | 3909 | case RDMA_PS_IPOIB: |
2d2e9415 | 3910 | case RDMA_PS_IB: |
aac978e1 | 3911 | return id_priv->id.ps; |
e51060f0 | 3912 | default: |
aac978e1 HE |
3913 | |
3914 | return 0; | |
58afdcb7 SH |
3915 | } |
3916 | } | |
3917 | ||
2253fc0c SW |
3918 | static enum rdma_ucm_port_space |
3919 | cma_select_ib_ps(struct rdma_id_private *id_priv) | |
58afdcb7 | 3920 | { |
2253fc0c | 3921 | enum rdma_ucm_port_space ps = 0; |
58afdcb7 SH |
3922 | struct sockaddr_ib *sib; |
3923 | u64 sid_ps, mask, sid; | |
3924 | ||
f4753834 | 3925 | sib = (struct sockaddr_ib *) cma_src_addr(id_priv); |
58afdcb7 SH |
3926 | mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK; |
3927 | sid = be64_to_cpu(sib->sib_sid) & mask; | |
3928 | ||
3929 | if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { | |
3930 | sid_ps = RDMA_IB_IP_PS_IB; | |
aac978e1 | 3931 | ps = RDMA_PS_IB; |
58afdcb7 SH |
3932 | } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && |
3933 | (sid == (RDMA_IB_IP_PS_TCP & mask))) { | |
3934 | sid_ps = RDMA_IB_IP_PS_TCP; | |
aac978e1 | 3935 | ps = RDMA_PS_TCP; |
58afdcb7 SH |
3936 | } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && |
3937 | (sid == (RDMA_IB_IP_PS_UDP & mask))) { | |
3938 | sid_ps = RDMA_IB_IP_PS_UDP; | |
aac978e1 | 3939 | ps = RDMA_PS_UDP; |
e51060f0 SH |
3940 | } |
3941 | ||
58afdcb7 SH |
3942 | if (ps) { |
3943 | sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib))); | |
3944 | sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK | | |
3945 | be64_to_cpu(sib->sib_sid_mask)); | |
3946 | } | |
3947 | return ps; | |
3948 | } | |
3949 | ||
3950 | static int cma_get_port(struct rdma_id_private *id_priv) | |
3951 | { | |
2253fc0c | 3952 | enum rdma_ucm_port_space ps; |
58afdcb7 SH |
3953 | int ret; |
3954 | ||
f4753834 | 3955 | if (cma_family(id_priv) != AF_IB) |
58afdcb7 SH |
3956 | ps = cma_select_inet_ps(id_priv); |
3957 | else | |
3958 | ps = cma_select_ib_ps(id_priv); | |
3959 | if (!ps) | |
3960 | return -EPROTONOSUPPORT; | |
3961 | ||
e51060f0 | 3962 | mutex_lock(&lock); |
f4753834 | 3963 | if (cma_any_port(cma_src_addr(id_priv))) |
aedec080 | 3964 | ret = cma_alloc_any_port(ps, id_priv); |
e51060f0 SH |
3965 | else |
3966 | ret = cma_use_port(ps, id_priv); | |
3967 | mutex_unlock(&lock); | |
3968 | ||
3969 | return ret; | |
3970 | } | |
3971 | ||
d14714df SH |
3972 | static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, |
3973 | struct sockaddr *addr) | |
3974 | { | |
d90f9b35 | 3975 | #if IS_ENABLED(CONFIG_IPV6) |
d14714df SH |
3976 | struct sockaddr_in6 *sin6; |
3977 | ||
3978 | if (addr->sa_family != AF_INET6) | |
3979 | return 0; | |
3980 | ||
3981 | sin6 = (struct sockaddr_in6 *) addr; | |
5462eddd SK |
3982 | |
3983 | if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)) | |
3984 | return 0; | |
3985 | ||
3986 | if (!sin6->sin6_scope_id) | |
d14714df SH |
3987 | return -EINVAL; |
3988 | ||
3989 | dev_addr->bound_dev_if = sin6->sin6_scope_id; | |
3990 | #endif | |
3991 | return 0; | |
3992 | } | |
3993 | ||
a9bb7912 HS |
3994 | int rdma_listen(struct rdma_cm_id *id, int backlog) |
3995 | { | |
732d41c5 JG |
3996 | struct rdma_id_private *id_priv = |
3997 | container_of(id, struct rdma_id_private, id); | |
a9bb7912 HS |
3998 | int ret; |
3999 | ||
732d41c5 | 4000 | if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) { |
bc0bdc5a JG |
4001 | struct sockaddr_in any_in = { |
4002 | .sin_family = AF_INET, | |
4003 | .sin_addr.s_addr = htonl(INADDR_ANY), | |
4004 | }; | |
4005 | ||
732d41c5 | 4006 | /* For a well behaved ULP state will be RDMA_CM_IDLE */ |
bc0bdc5a | 4007 | ret = rdma_bind_addr(id, (struct sockaddr *)&any_in); |
a9bb7912 HS |
4008 | if (ret) |
4009 | return ret; | |
732d41c5 JG |
4010 | if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, |
4011 | RDMA_CM_LISTEN))) | |
4012 | return -EINVAL; | |
a9bb7912 HS |
4013 | } |
4014 | ||
d490ee52 JG |
4015 | /* |
4016 | * Once the ID reaches RDMA_CM_LISTEN it is not allowed to be reusable | |
4017 | * any more, and has to be unique in the bind list. | |
4018 | */ | |
a9bb7912 | 4019 | if (id_priv->reuseaddr) { |
d490ee52 JG |
4020 | mutex_lock(&lock); |
4021 | ret = cma_check_port(id_priv->bind_list, id_priv, 0); | |
4022 | if (!ret) | |
4023 | id_priv->reuseaddr = 0; | |
4024 | mutex_unlock(&lock); | |
a9bb7912 HS |
4025 | if (ret) |
4026 | goto err; | |
4027 | } | |
4028 | ||
4029 | id_priv->backlog = backlog; | |
889d916b | 4030 | if (id_priv->cma_dev) { |
72219cea | 4031 | if (rdma_cap_ib_cm(id->device, 1)) { |
a9bb7912 HS |
4032 | ret = cma_ib_listen(id_priv); |
4033 | if (ret) | |
4034 | goto err; | |
04215330 | 4035 | } else if (rdma_cap_iw_cm(id->device, 1)) { |
a9bb7912 HS |
4036 | ret = cma_iw_listen(id_priv, backlog); |
4037 | if (ret) | |
4038 | goto err; | |
21655afc | 4039 | } else { |
a9bb7912 HS |
4040 | ret = -ENOSYS; |
4041 | goto err; | |
4042 | } | |
c80a0c52 LR |
4043 | } else { |
4044 | ret = cma_listen_on_all(id_priv); | |
4045 | if (ret) | |
4046 | goto err; | |
4047 | } | |
a9bb7912 HS |
4048 | |
4049 | return 0; | |
4050 | err: | |
4051 | id_priv->backlog = 0; | |
d490ee52 JG |
4052 | /* |
4053 | * All the failure paths that lead here will not allow the req_handler's | |
4054 | * to have run. | |
4055 | */ | |
550e5ca7 | 4056 | cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); |
a9bb7912 HS |
4057 | return ret; |
4058 | } | |
4059 | EXPORT_SYMBOL(rdma_listen); | |
4060 | ||
e51060f0 SH |
4061 | int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) |
4062 | { | |
4063 | struct rdma_id_private *id_priv; | |
4064 | int ret; | |
6df6b4a9 | 4065 | struct sockaddr *daddr; |
e51060f0 | 4066 | |
680f920a SH |
4067 | if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 && |
4068 | addr->sa_family != AF_IB) | |
e51060f0 SH |
4069 | return -EAFNOSUPPORT; |
4070 | ||
4071 | id_priv = container_of(id, struct rdma_id_private, id); | |
550e5ca7 | 4072 | if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) |
e51060f0 SH |
4073 | return -EINVAL; |
4074 | ||
d14714df SH |
4075 | ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); |
4076 | if (ret) | |
4077 | goto err1; | |
4078 | ||
7b85627b | 4079 | memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); |
8523c048 | 4080 | if (!cma_any_addr(addr)) { |
680f920a | 4081 | ret = cma_translate_addr(addr, &id->route.addr.dev_addr); |
e51060f0 | 4082 | if (ret) |
255d0c14 KK |
4083 | goto err1; |
4084 | ||
ff11c6cd | 4085 | ret = cma_acquire_dev_by_src_ip(id_priv); |
255d0c14 KK |
4086 | if (ret) |
4087 | goto err1; | |
e51060f0 SH |
4088 | } |
4089 | ||
68602120 SH |
4090 | if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { |
4091 | if (addr->sa_family == AF_INET) | |
4092 | id_priv->afonly = 1; | |
5b0ec991 | 4093 | #if IS_ENABLED(CONFIG_IPV6) |
fa20105e GS |
4094 | else if (addr->sa_family == AF_INET6) { |
4095 | struct net *net = id_priv->id.route.addr.dev_addr.net; | |
4096 | ||
4097 | id_priv->afonly = net->ipv6.sysctl.bindv6only; | |
4098 | } | |
5b0ec991 | 4099 | #endif |
68602120 | 4100 | } |
9dea9a2f TN |
4101 | daddr = cma_dst_addr(id_priv); |
4102 | daddr->sa_family = addr->sa_family; | |
4103 | ||
e51060f0 SH |
4104 | ret = cma_get_port(id_priv); |
4105 | if (ret) | |
255d0c14 | 4106 | goto err2; |
e51060f0 | 4107 | |
cb5cd0ea SD |
4108 | if (!cma_any_addr(addr)) |
4109 | rdma_restrack_add(&id_priv->res); | |
e51060f0 | 4110 | return 0; |
255d0c14 | 4111 | err2: |
ed7a01fd | 4112 | if (id_priv->cma_dev) |
a396d43a | 4113 | cma_release_dev(id_priv); |
255d0c14 | 4114 | err1: |
550e5ca7 | 4115 | cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); |
e51060f0 SH |
4116 | return ret; |
4117 | } | |
4118 | EXPORT_SYMBOL(rdma_bind_addr); | |
4119 | ||
f4753834 | 4120 | static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv) |
e51060f0 | 4121 | { |
e51060f0 | 4122 | struct cma_hdr *cma_hdr; |
e51060f0 | 4123 | |
01602f11 SH |
4124 | cma_hdr = hdr; |
4125 | cma_hdr->cma_version = CMA_VERSION; | |
f4753834 | 4126 | if (cma_family(id_priv) == AF_INET) { |
1f5175ad AS |
4127 | struct sockaddr_in *src4, *dst4; |
4128 | ||
f4753834 SH |
4129 | src4 = (struct sockaddr_in *) cma_src_addr(id_priv); |
4130 | dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv); | |
1f5175ad | 4131 | |
01602f11 SH |
4132 | cma_set_ip_ver(cma_hdr, 4); |
4133 | cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; | |
4134 | cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; | |
4135 | cma_hdr->port = src4->sin_port; | |
e8160e15 | 4136 | } else if (cma_family(id_priv) == AF_INET6) { |
1f5175ad AS |
4137 | struct sockaddr_in6 *src6, *dst6; |
4138 | ||
f4753834 SH |
4139 | src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); |
4140 | dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv); | |
1f5175ad | 4141 | |
01602f11 SH |
4142 | cma_set_ip_ver(cma_hdr, 6); |
4143 | cma_hdr->src_addr.ip6 = src6->sin6_addr; | |
4144 | cma_hdr->dst_addr.ip6 = dst6->sin6_addr; | |
4145 | cma_hdr->port = src6->sin6_port; | |
e51060f0 SH |
4146 | } |
4147 | return 0; | |
4148 | } | |
4149 | ||
628e5f6d | 4150 | static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, |
e7ff98ae | 4151 | const struct ib_cm_event *ib_event) |
628e5f6d SH |
4152 | { |
4153 | struct rdma_id_private *id_priv = cm_id->context; | |
7582df82 | 4154 | struct rdma_cm_event event = {}; |
e7ff98ae PP |
4155 | const struct ib_cm_sidr_rep_event_param *rep = |
4156 | &ib_event->param.sidr_rep_rcvd; | |
f6a9d47a | 4157 | int ret; |
628e5f6d | 4158 | |
37e07cda | 4159 | mutex_lock(&id_priv->handler_mutex); |
2a7cec53 | 4160 | if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) |
37e07cda | 4161 | goto out; |
628e5f6d SH |
4162 | |
4163 | switch (ib_event->event) { | |
4164 | case IB_CM_SIDR_REQ_ERROR: | |
4165 | event.event = RDMA_CM_EVENT_UNREACHABLE; | |
4166 | event.status = -ETIMEDOUT; | |
4167 | break; | |
4168 | case IB_CM_SIDR_REP_RECEIVED: | |
4169 | event.param.ud.private_data = ib_event->private_data; | |
4170 | event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; | |
4171 | if (rep->status != IB_SIDR_SUCCESS) { | |
4172 | event.event = RDMA_CM_EVENT_UNREACHABLE; | |
4173 | event.status = ib_event->param.sidr_rep_rcvd.status; | |
498683c6 MS |
4174 | pr_debug_ratelimited("RDMA CM: UNREACHABLE: bad SIDR reply. status %d\n", |
4175 | event.status); | |
628e5f6d SH |
4176 | break; |
4177 | } | |
5c438135 | 4178 | ret = cma_set_qkey(id_priv, rep->qkey); |
d2ca39f2 | 4179 | if (ret) { |
498683c6 | 4180 | pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to set qkey. status %d\n", ret); |
d2ca39f2 | 4181 | event.event = RDMA_CM_EVENT_ADDR_ERROR; |
5c438135 | 4182 | event.status = ret; |
628e5f6d SH |
4183 | break; |
4184 | } | |
4ad6a024 PP |
4185 | ib_init_ah_attr_from_path(id_priv->id.device, |
4186 | id_priv->id.port_num, | |
4187 | id_priv->id.route.path_rec, | |
39839107 PP |
4188 | &event.param.ud.ah_attr, |
4189 | rep->sgid_attr); | |
628e5f6d SH |
4190 | event.param.ud.qp_num = rep->qpn; |
4191 | event.param.ud.qkey = rep->qkey; | |
4192 | event.event = RDMA_CM_EVENT_ESTABLISHED; | |
4193 | event.status = 0; | |
4194 | break; | |
4195 | default: | |
aba25a3e | 4196 | pr_err("RDMA CMA: unexpected IB CM event: %d\n", |
628e5f6d SH |
4197 | ib_event->event); |
4198 | goto out; | |
4199 | } | |
4200 | ||
ed999f82 | 4201 | ret = cma_cm_event_handler(id_priv, &event); |
aa74f487 PP |
4202 | |
4203 | rdma_destroy_ah_attr(&event.param.ud.ah_attr); | |
628e5f6d SH |
4204 | if (ret) { |
4205 | /* Destroy the CM ID by returning a non-zero value. */ | |
4206 | id_priv->cm_id.ib = NULL; | |
f6a9d47a | 4207 | destroy_id_handler_unlock(id_priv); |
628e5f6d SH |
4208 | return ret; |
4209 | } | |
4210 | out: | |
de910bd9 | 4211 | mutex_unlock(&id_priv->handler_mutex); |
f6a9d47a | 4212 | return 0; |
628e5f6d SH |
4213 | } |
4214 | ||
4215 | static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, | |
4216 | struct rdma_conn_param *conn_param) | |
4217 | { | |
4218 | struct ib_cm_sidr_req_param req; | |
0c9361fc | 4219 | struct ib_cm_id *id; |
e511d1ae | 4220 | void *private_data; |
c0b64f58 BVA |
4221 | u8 offset; |
4222 | int ret; | |
628e5f6d | 4223 | |
e511d1ae | 4224 | memset(&req, 0, sizeof req); |
e8160e15 | 4225 | offset = cma_user_data_offset(id_priv); |
8d0d2b0f | 4226 | if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) |
04ded167 SH |
4227 | return -EINVAL; |
4228 | ||
e8160e15 | 4229 | if (req.private_data_len) { |
e511d1ae SH |
4230 | private_data = kzalloc(req.private_data_len, GFP_ATOMIC); |
4231 | if (!private_data) | |
e8160e15 SH |
4232 | return -ENOMEM; |
4233 | } else { | |
e511d1ae | 4234 | private_data = NULL; |
e8160e15 | 4235 | } |
628e5f6d SH |
4236 | |
4237 | if (conn_param->private_data && conn_param->private_data_len) | |
e511d1ae SH |
4238 | memcpy(private_data + offset, conn_param->private_data, |
4239 | conn_param->private_data_len); | |
628e5f6d | 4240 | |
e511d1ae SH |
4241 | if (private_data) { |
4242 | ret = cma_format_hdr(private_data, id_priv); | |
e8160e15 SH |
4243 | if (ret) |
4244 | goto out; | |
e511d1ae | 4245 | req.private_data = private_data; |
e8160e15 | 4246 | } |
628e5f6d | 4247 | |
0c9361fc JM |
4248 | id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, |
4249 | id_priv); | |
4250 | if (IS_ERR(id)) { | |
4251 | ret = PTR_ERR(id); | |
628e5f6d SH |
4252 | goto out; |
4253 | } | |
0c9361fc | 4254 | id_priv->cm_id.ib = id; |
628e5f6d | 4255 | |
f4753834 | 4256 | req.path = id_priv->id.route.path_rec; |
815d456e | 4257 | req.sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; |
cf53936f | 4258 | req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); |
628e5f6d SH |
4259 | req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); |
4260 | req.max_cm_retries = CMA_MAX_CM_RETRIES; | |
4261 | ||
ed999f82 | 4262 | trace_cm_send_sidr_req(id_priv); |
628e5f6d SH |
4263 | ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); |
4264 | if (ret) { | |
4265 | ib_destroy_cm_id(id_priv->cm_id.ib); | |
4266 | id_priv->cm_id.ib = NULL; | |
4267 | } | |
4268 | out: | |
e511d1ae | 4269 | kfree(private_data); |
628e5f6d SH |
4270 | return ret; |
4271 | } | |
4272 | ||
e51060f0 SH |
4273 | static int cma_connect_ib(struct rdma_id_private *id_priv, |
4274 | struct rdma_conn_param *conn_param) | |
4275 | { | |
4276 | struct ib_cm_req_param req; | |
4277 | struct rdma_route *route; | |
4278 | void *private_data; | |
0c9361fc | 4279 | struct ib_cm_id *id; |
c0b64f58 BVA |
4280 | u8 offset; |
4281 | int ret; | |
e51060f0 SH |
4282 | |
4283 | memset(&req, 0, sizeof req); | |
e8160e15 | 4284 | offset = cma_user_data_offset(id_priv); |
8d0d2b0f | 4285 | if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) |
04ded167 SH |
4286 | return -EINVAL; |
4287 | ||
e8160e15 SH |
4288 | if (req.private_data_len) { |
4289 | private_data = kzalloc(req.private_data_len, GFP_ATOMIC); | |
4290 | if (!private_data) | |
4291 | return -ENOMEM; | |
4292 | } else { | |
4293 | private_data = NULL; | |
4294 | } | |
e51060f0 SH |
4295 | |
4296 | if (conn_param->private_data && conn_param->private_data_len) | |
4297 | memcpy(private_data + offset, conn_param->private_data, | |
4298 | conn_param->private_data_len); | |
4299 | ||
0c9361fc JM |
4300 | id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv); |
4301 | if (IS_ERR(id)) { | |
4302 | ret = PTR_ERR(id); | |
e51060f0 SH |
4303 | goto out; |
4304 | } | |
0c9361fc | 4305 | id_priv->cm_id.ib = id; |
e51060f0 SH |
4306 | |
4307 | route = &id_priv->id.route; | |
e8160e15 SH |
4308 | if (private_data) { |
4309 | ret = cma_format_hdr(private_data, id_priv); | |
4310 | if (ret) | |
4311 | goto out; | |
4312 | req.private_data = private_data; | |
4313 | } | |
e51060f0 SH |
4314 | |
4315 | req.primary_path = &route->path_rec[0]; | |
eb8336db MZ |
4316 | req.primary_path_inbound = route->path_rec_inbound; |
4317 | req.primary_path_outbound = route->path_rec_outbound; | |
bf9a9928 | 4318 | if (route->num_pri_alt_paths == 2) |
e51060f0 SH |
4319 | req.alternate_path = &route->path_rec[1]; |
4320 | ||
815d456e PP |
4321 | req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; |
4322 | /* Alternate path SGID attribute currently unsupported */ | |
cf53936f | 4323 | req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); |
e51060f0 | 4324 | req.qp_num = id_priv->qp_num; |
18c441a6 | 4325 | req.qp_type = id_priv->id.qp_type; |
e51060f0 SH |
4326 | req.starting_psn = id_priv->seq_num; |
4327 | req.responder_resources = conn_param->responder_resources; | |
4328 | req.initiator_depth = conn_param->initiator_depth; | |
4329 | req.flow_control = conn_param->flow_control; | |
4ede178a SH |
4330 | req.retry_count = min_t(u8, 7, conn_param->retry_count); |
4331 | req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); | |
e51060f0 SH |
4332 | req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; |
4333 | req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; | |
4334 | req.max_cm_retries = CMA_MAX_CM_RETRIES; | |
4335 | req.srq = id_priv->srq ? 1 : 0; | |
a20652e1 LR |
4336 | req.ece.vendor_id = id_priv->ece.vendor_id; |
4337 | req.ece.attr_mod = id_priv->ece.attr_mod; | |
e51060f0 | 4338 | |
ed999f82 | 4339 | trace_cm_send_req(id_priv); |
e51060f0 SH |
4340 | ret = ib_send_cm_req(id_priv->cm_id.ib, &req); |
4341 | out: | |
0c9361fc JM |
4342 | if (ret && !IS_ERR(id)) { |
4343 | ib_destroy_cm_id(id); | |
675a027c KK |
4344 | id_priv->cm_id.ib = NULL; |
4345 | } | |
4346 | ||
e51060f0 SH |
4347 | kfree(private_data); |
4348 | return ret; | |
4349 | } | |
4350 | ||
07ebafba TT |
4351 | static int cma_connect_iw(struct rdma_id_private *id_priv, |
4352 | struct rdma_conn_param *conn_param) | |
4353 | { | |
4354 | struct iw_cm_id *cm_id; | |
07ebafba TT |
4355 | int ret; |
4356 | struct iw_cm_conn_param iw_param; | |
4357 | ||
4358 | cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); | |
0c9361fc JM |
4359 | if (IS_ERR(cm_id)) |
4360 | return PTR_ERR(cm_id); | |
07ebafba | 4361 | |
ca0c448d | 4362 | mutex_lock(&id_priv->qp_mutex); |
68cdba06 | 4363 | cm_id->tos = id_priv->tos; |
926ba19b | 4364 | cm_id->tos_set = id_priv->tos_set; |
ca0c448d HB |
4365 | mutex_unlock(&id_priv->qp_mutex); |
4366 | ||
07ebafba TT |
4367 | id_priv->cm_id.iw = cm_id; |
4368 | ||
24d44a39 SW |
4369 | memcpy(&cm_id->local_addr, cma_src_addr(id_priv), |
4370 | rdma_addr_size(cma_src_addr(id_priv))); | |
4371 | memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv), | |
4372 | rdma_addr_size(cma_dst_addr(id_priv))); | |
07ebafba | 4373 | |
5851bb89 | 4374 | ret = cma_modify_qp_rtr(id_priv, conn_param); |
675a027c KK |
4375 | if (ret) |
4376 | goto out; | |
07ebafba | 4377 | |
f45ee80e HS |
4378 | if (conn_param) { |
4379 | iw_param.ord = conn_param->initiator_depth; | |
4380 | iw_param.ird = conn_param->responder_resources; | |
4381 | iw_param.private_data = conn_param->private_data; | |
4382 | iw_param.private_data_len = conn_param->private_data_len; | |
4383 | iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; | |
4384 | } else { | |
4385 | memset(&iw_param, 0, sizeof iw_param); | |
07ebafba | 4386 | iw_param.qpn = id_priv->qp_num; |
f45ee80e | 4387 | } |
07ebafba TT |
4388 | ret = iw_cm_connect(cm_id, &iw_param); |
4389 | out: | |
0c9361fc | 4390 | if (ret) { |
675a027c KK |
4391 | iw_destroy_cm_id(cm_id); |
4392 | id_priv->cm_id.iw = NULL; | |
4393 | } | |
07ebafba TT |
4394 | return ret; |
4395 | } | |
4396 | ||
071ba4cc JG |
4397 | /** |
4398 | * rdma_connect_locked - Initiate an active connection request. | |
4399 | * @id: Connection identifier to connect. | |
4400 | * @conn_param: Connection information used for connected QPs. | |
4401 | * | |
4402 | * Same as rdma_connect() but can only be called from the | |
4403 | * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback. | |
4404 | */ | |
4405 | int rdma_connect_locked(struct rdma_cm_id *id, | |
4406 | struct rdma_conn_param *conn_param) | |
e51060f0 | 4407 | { |
2a7cec53 JG |
4408 | struct rdma_id_private *id_priv = |
4409 | container_of(id, struct rdma_id_private, id); | |
e51060f0 SH |
4410 | int ret; |
4411 | ||
071ba4cc JG |
4412 | if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) |
4413 | return -EINVAL; | |
e51060f0 SH |
4414 | |
4415 | if (!id->qp) { | |
4416 | id_priv->qp_num = conn_param->qp_num; | |
e51060f0 SH |
4417 | id_priv->srq = conn_param->srq; |
4418 | } | |
4419 | ||
72219cea | 4420 | if (rdma_cap_ib_cm(id->device, id->port_num)) { |
b26f9b99 | 4421 | if (id->qp_type == IB_QPT_UD) |
628e5f6d SH |
4422 | ret = cma_resolve_ib_udp(id_priv, conn_param); |
4423 | else | |
4424 | ret = cma_connect_ib(id_priv, conn_param); | |
b6eb7011 | 4425 | } else if (rdma_cap_iw_cm(id->device, id->port_num)) { |
07ebafba | 4426 | ret = cma_connect_iw(id_priv, conn_param); |
b6eb7011 | 4427 | } else { |
e51060f0 | 4428 | ret = -ENOSYS; |
b6eb7011 | 4429 | } |
e51060f0 | 4430 | if (ret) |
2a7cec53 | 4431 | goto err_state; |
e51060f0 | 4432 | return 0; |
2a7cec53 | 4433 | err_state: |
550e5ca7 | 4434 | cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); |
071ba4cc JG |
4435 | return ret; |
4436 | } | |
4437 | EXPORT_SYMBOL(rdma_connect_locked); | |
4438 | ||
4439 | /** | |
4440 | * rdma_connect - Initiate an active connection request. | |
4441 | * @id: Connection identifier to connect. | |
4442 | * @conn_param: Connection information used for connected QPs. | |
4443 | * | |
4444 | * Users must have resolved a route for the rdma_cm_id to connect with by having | |
4445 | * called rdma_resolve_route before calling this routine. | |
4446 | * | |
4447 | * This call will either connect to a remote QP or obtain remote QP information | |
4448 | * for unconnected rdma_cm_id's. The actual operation is based on the | |
4449 | * rdma_cm_id's port space. | |
4450 | */ | |
4451 | int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | |
4452 | { | |
4453 | struct rdma_id_private *id_priv = | |
4454 | container_of(id, struct rdma_id_private, id); | |
4455 | int ret; | |
4456 | ||
4457 | mutex_lock(&id_priv->handler_mutex); | |
4458 | ret = rdma_connect_locked(id, conn_param); | |
2a7cec53 | 4459 | mutex_unlock(&id_priv->handler_mutex); |
e51060f0 SH |
4460 | return ret; |
4461 | } | |
4462 | EXPORT_SYMBOL(rdma_connect); | |
4463 | ||
34e2ab57 LR |
4464 | /** |
4465 | * rdma_connect_ece - Initiate an active connection request with ECE data. | |
4466 | * @id: Connection identifier to connect. | |
4467 | * @conn_param: Connection information used for connected QPs. | |
4468 | * @ece: ECE parameters | |
4469 | * | |
4470 | * See rdma_connect() explanation. | |
4471 | */ | |
4472 | int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, | |
4473 | struct rdma_ucm_ece *ece) | |
4474 | { | |
4475 | struct rdma_id_private *id_priv = | |
4476 | container_of(id, struct rdma_id_private, id); | |
4477 | ||
4478 | id_priv->ece.vendor_id = ece->vendor_id; | |
4479 | id_priv->ece.attr_mod = ece->attr_mod; | |
4480 | ||
4481 | return rdma_connect(id, conn_param); | |
4482 | } | |
4483 | EXPORT_SYMBOL(rdma_connect_ece); | |
4484 | ||
e51060f0 SH |
4485 | static int cma_accept_ib(struct rdma_id_private *id_priv, |
4486 | struct rdma_conn_param *conn_param) | |
4487 | { | |
4488 | struct ib_cm_rep_param rep; | |
5851bb89 | 4489 | int ret; |
0fe313b0 | 4490 | |
5851bb89 SH |
4491 | ret = cma_modify_qp_rtr(id_priv, conn_param); |
4492 | if (ret) | |
4493 | goto out; | |
0fe313b0 | 4494 | |
5851bb89 SH |
4495 | ret = cma_modify_qp_rts(id_priv, conn_param); |
4496 | if (ret) | |
4497 | goto out; | |
e51060f0 SH |
4498 | |
4499 | memset(&rep, 0, sizeof rep); | |
4500 | rep.qp_num = id_priv->qp_num; | |
4501 | rep.starting_psn = id_priv->seq_num; | |
4502 | rep.private_data = conn_param->private_data; | |
4503 | rep.private_data_len = conn_param->private_data_len; | |
4504 | rep.responder_resources = conn_param->responder_resources; | |
4505 | rep.initiator_depth = conn_param->initiator_depth; | |
e51060f0 SH |
4506 | rep.failover_accepted = 0; |
4507 | rep.flow_control = conn_param->flow_control; | |
4ede178a | 4508 | rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); |
e51060f0 | 4509 | rep.srq = id_priv->srq ? 1 : 0; |
0cb15372 LR |
4510 | rep.ece.vendor_id = id_priv->ece.vendor_id; |
4511 | rep.ece.attr_mod = id_priv->ece.attr_mod; | |
e51060f0 | 4512 | |
ed999f82 | 4513 | trace_cm_send_rep(id_priv); |
0fe313b0 SH |
4514 | ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); |
4515 | out: | |
4516 | return ret; | |
e51060f0 SH |
4517 | } |
4518 | ||
07ebafba TT |
4519 | static int cma_accept_iw(struct rdma_id_private *id_priv, |
4520 | struct rdma_conn_param *conn_param) | |
4521 | { | |
4522 | struct iw_cm_conn_param iw_param; | |
4523 | int ret; | |
4524 | ||
f2625f7d SW |
4525 | if (!conn_param) |
4526 | return -EINVAL; | |
4527 | ||
5851bb89 | 4528 | ret = cma_modify_qp_rtr(id_priv, conn_param); |
07ebafba TT |
4529 | if (ret) |
4530 | return ret; | |
4531 | ||
4532 | iw_param.ord = conn_param->initiator_depth; | |
4533 | iw_param.ird = conn_param->responder_resources; | |
4534 | iw_param.private_data = conn_param->private_data; | |
4535 | iw_param.private_data_len = conn_param->private_data_len; | |
b6eb7011 | 4536 | if (id_priv->id.qp) |
07ebafba | 4537 | iw_param.qpn = id_priv->qp_num; |
b6eb7011 | 4538 | else |
07ebafba TT |
4539 | iw_param.qpn = conn_param->qp_num; |
4540 | ||
4541 | return iw_cm_accept(id_priv->cm_id.iw, &iw_param); | |
4542 | } | |
4543 | ||
628e5f6d | 4544 | static int cma_send_sidr_rep(struct rdma_id_private *id_priv, |
5c438135 | 4545 | enum ib_cm_sidr_status status, u32 qkey, |
628e5f6d SH |
4546 | const void *private_data, int private_data_len) |
4547 | { | |
4548 | struct ib_cm_sidr_rep_param rep; | |
d2ca39f2 | 4549 | int ret; |
628e5f6d SH |
4550 | |
4551 | memset(&rep, 0, sizeof rep); | |
4552 | rep.status = status; | |
4553 | if (status == IB_SIDR_SUCCESS) { | |
5c438135 | 4554 | ret = cma_set_qkey(id_priv, qkey); |
d2ca39f2 YE |
4555 | if (ret) |
4556 | return ret; | |
628e5f6d | 4557 | rep.qp_num = id_priv->qp_num; |
c8f6a362 | 4558 | rep.qkey = id_priv->qkey; |
0cb15372 LR |
4559 | |
4560 | rep.ece.vendor_id = id_priv->ece.vendor_id; | |
4561 | rep.ece.attr_mod = id_priv->ece.attr_mod; | |
628e5f6d | 4562 | } |
0cb15372 | 4563 | |
628e5f6d SH |
4564 | rep.private_data = private_data; |
4565 | rep.private_data_len = private_data_len; | |
4566 | ||
ed999f82 | 4567 | trace_cm_send_sidr_rep(id_priv); |
628e5f6d SH |
4568 | return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); |
4569 | } | |
4570 | ||
b09c4d70 LR |
4571 | /** |
4572 | * rdma_accept - Called to accept a connection request or response. | |
4573 | * @id: Connection identifier associated with the request. | |
4574 | * @conn_param: Information needed to establish the connection. This must be | |
4575 | * provided if accepting a connection request. If accepting a connection | |
4576 | * response, this parameter must be NULL. | |
4577 | * | |
4578 | * Typically, this routine is only called by the listener to accept a connection | |
4579 | * request. It must also be called on the active side of a connection if the | |
4580 | * user is performing their own QP transitions. | |
4581 | * | |
4582 | * In the case of error, a reject message is sent to the remote side and the | |
4583 | * state of the qp associated with the id is modified to error, such that any | |
4584 | * previously posted receive buffers would be flushed. | |
4585 | * | |
4586 | * This function is for use by kernel ULPs and must be called from under the | |
4587 | * handler callback. | |
4588 | */ | |
4589 | int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | |
e51060f0 | 4590 | { |
d114c6fe JG |
4591 | struct rdma_id_private *id_priv = |
4592 | container_of(id, struct rdma_id_private, id); | |
e51060f0 SH |
4593 | int ret; |
4594 | ||
d114c6fe | 4595 | lockdep_assert_held(&id_priv->handler_mutex); |
83e9502d | 4596 | |
d114c6fe | 4597 | if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) |
e51060f0 SH |
4598 | return -EINVAL; |
4599 | ||
4600 | if (!id->qp && conn_param) { | |
4601 | id_priv->qp_num = conn_param->qp_num; | |
e51060f0 SH |
4602 | id_priv->srq = conn_param->srq; |
4603 | } | |
4604 | ||
72219cea | 4605 | if (rdma_cap_ib_cm(id->device, id->port_num)) { |
f45ee80e HS |
4606 | if (id->qp_type == IB_QPT_UD) { |
4607 | if (conn_param) | |
4608 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, | |
5c438135 | 4609 | conn_param->qkey, |
f45ee80e HS |
4610 | conn_param->private_data, |
4611 | conn_param->private_data_len); | |
4612 | else | |
4613 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, | |
5c438135 | 4614 | 0, NULL, 0); |
f45ee80e HS |
4615 | } else { |
4616 | if (conn_param) | |
4617 | ret = cma_accept_ib(id_priv, conn_param); | |
4618 | else | |
4619 | ret = cma_rep_recv(id_priv); | |
4620 | } | |
b6eb7011 | 4621 | } else if (rdma_cap_iw_cm(id->device, id->port_num)) { |
07ebafba | 4622 | ret = cma_accept_iw(id_priv, conn_param); |
b6eb7011 | 4623 | } else { |
e51060f0 | 4624 | ret = -ENOSYS; |
b6eb7011 | 4625 | } |
e51060f0 SH |
4626 | if (ret) |
4627 | goto reject; | |
4628 | ||
4629 | return 0; | |
4630 | reject: | |
c5483388 | 4631 | cma_modify_qp_err(id_priv); |
8094ba0a | 4632 | rdma_reject(id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED); |
e51060f0 SH |
4633 | return ret; |
4634 | } | |
b09c4d70 | 4635 | EXPORT_SYMBOL(rdma_accept); |
e51060f0 | 4636 | |
b09c4d70 LR |
4637 | int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, |
4638 | struct rdma_ucm_ece *ece) | |
0cb15372 LR |
4639 | { |
4640 | struct rdma_id_private *id_priv = | |
4641 | container_of(id, struct rdma_id_private, id); | |
4642 | ||
4643 | id_priv->ece.vendor_id = ece->vendor_id; | |
4644 | id_priv->ece.attr_mod = ece->attr_mod; | |
4645 | ||
b09c4d70 | 4646 | return rdma_accept(id, conn_param); |
0cb15372 | 4647 | } |
b09c4d70 | 4648 | EXPORT_SYMBOL(rdma_accept_ece); |
0cb15372 | 4649 | |
d114c6fe JG |
4650 | void rdma_lock_handler(struct rdma_cm_id *id) |
4651 | { | |
4652 | struct rdma_id_private *id_priv = | |
4653 | container_of(id, struct rdma_id_private, id); | |
4654 | ||
4655 | mutex_lock(&id_priv->handler_mutex); | |
0cb15372 | 4656 | } |
d114c6fe JG |
4657 | EXPORT_SYMBOL(rdma_lock_handler); |
4658 | ||
4659 | void rdma_unlock_handler(struct rdma_cm_id *id) | |
4660 | { | |
4661 | struct rdma_id_private *id_priv = | |
4662 | container_of(id, struct rdma_id_private, id); | |
4663 | ||
4664 | mutex_unlock(&id_priv->handler_mutex); | |
4665 | } | |
4666 | EXPORT_SYMBOL(rdma_unlock_handler); | |
0cb15372 | 4667 | |
0fe313b0 SH |
4668 | int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) |
4669 | { | |
4670 | struct rdma_id_private *id_priv; | |
4671 | int ret; | |
4672 | ||
4673 | id_priv = container_of(id, struct rdma_id_private, id); | |
0c9361fc | 4674 | if (!id_priv->cm_id.ib) |
0fe313b0 SH |
4675 | return -EINVAL; |
4676 | ||
4677 | switch (id->device->node_type) { | |
4678 | case RDMA_NODE_IB_CA: | |
4679 | ret = ib_cm_notify(id_priv->cm_id.ib, event); | |
4680 | break; | |
4681 | default: | |
4682 | ret = 0; | |
4683 | break; | |
4684 | } | |
4685 | return ret; | |
4686 | } | |
4687 | EXPORT_SYMBOL(rdma_notify); | |
4688 | ||
e51060f0 | 4689 | int rdma_reject(struct rdma_cm_id *id, const void *private_data, |
8094ba0a | 4690 | u8 private_data_len, u8 reason) |
e51060f0 SH |
4691 | { |
4692 | struct rdma_id_private *id_priv; | |
4693 | int ret; | |
4694 | ||
4695 | id_priv = container_of(id, struct rdma_id_private, id); | |
0c9361fc | 4696 | if (!id_priv->cm_id.ib) |
e51060f0 SH |
4697 | return -EINVAL; |
4698 | ||
72219cea | 4699 | if (rdma_cap_ib_cm(id->device, id->port_num)) { |
ed999f82 | 4700 | if (id->qp_type == IB_QPT_UD) { |
5c438135 | 4701 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, |
628e5f6d | 4702 | private_data, private_data_len); |
ed999f82 CL |
4703 | } else { |
4704 | trace_cm_send_rej(id_priv); | |
8094ba0a LR |
4705 | ret = ib_send_cm_rej(id_priv->cm_id.ib, reason, NULL, 0, |
4706 | private_data, private_data_len); | |
ed999f82 | 4707 | } |
04215330 | 4708 | } else if (rdma_cap_iw_cm(id->device, id->port_num)) { |
07ebafba TT |
4709 | ret = iw_cm_reject(id_priv->cm_id.iw, |
4710 | private_data, private_data_len); | |
b6eb7011 | 4711 | } else { |
e51060f0 | 4712 | ret = -ENOSYS; |
b6eb7011 | 4713 | } |
21655afc | 4714 | |
e51060f0 SH |
4715 | return ret; |
4716 | } | |
4717 | EXPORT_SYMBOL(rdma_reject); | |
4718 | ||
4719 | int rdma_disconnect(struct rdma_cm_id *id) | |
4720 | { | |
4721 | struct rdma_id_private *id_priv; | |
4722 | int ret; | |
4723 | ||
4724 | id_priv = container_of(id, struct rdma_id_private, id); | |
0c9361fc | 4725 | if (!id_priv->cm_id.ib) |
e51060f0 SH |
4726 | return -EINVAL; |
4727 | ||
72219cea | 4728 | if (rdma_cap_ib_cm(id->device, id->port_num)) { |
c5483388 | 4729 | ret = cma_modify_qp_err(id_priv); |
07ebafba TT |
4730 | if (ret) |
4731 | goto out; | |
e51060f0 | 4732 | /* Initiate or respond to a disconnect. */ |
ed999f82 CL |
4733 | trace_cm_disconnect(id_priv); |
4734 | if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) { | |
4735 | if (!ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0)) | |
4736 | trace_cm_sent_drep(id_priv); | |
4737 | } else { | |
4738 | trace_cm_sent_dreq(id_priv); | |
4739 | } | |
04215330 | 4740 | } else if (rdma_cap_iw_cm(id->device, id->port_num)) { |
07ebafba | 4741 | ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); |
21655afc | 4742 | } else |
07ebafba | 4743 | ret = -EINVAL; |
21655afc | 4744 | |
e51060f0 SH |
4745 | out: |
4746 | return ret; | |
4747 | } | |
4748 | EXPORT_SYMBOL(rdma_disconnect); | |
4749 | ||
b5de0c60 JG |
4750 | static void cma_make_mc_event(int status, struct rdma_id_private *id_priv, |
4751 | struct ib_sa_multicast *multicast, | |
4752 | struct rdma_cm_event *event, | |
4753 | struct cma_multicast *mc) | |
c8f6a362 | 4754 | { |
b5de0c60 JG |
4755 | struct rdma_dev_addr *dev_addr; |
4756 | enum ib_gid_type gid_type; | |
4757 | struct net_device *ndev; | |
c8f6a362 | 4758 | |
5c438135 SH |
4759 | if (!status) |
4760 | status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); | |
498683c6 MS |
4761 | else |
4762 | pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n", | |
4763 | status); | |
b5de0c60 JG |
4764 | |
4765 | event->status = status; | |
4766 | event->param.ud.private_data = mc->context; | |
4767 | if (status) { | |
4768 | event->event = RDMA_CM_EVENT_MULTICAST_ERROR; | |
4769 | return; | |
498683c6 | 4770 | } |
c8f6a362 | 4771 | |
b5de0c60 JG |
4772 | dev_addr = &id_priv->id.route.addr.dev_addr; |
4773 | ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); | |
4774 | gid_type = | |
4775 | id_priv->cma_dev | |
4776 | ->default_gid_type[id_priv->id.port_num - | |
4777 | rdma_start_port( | |
4778 | id_priv->cma_dev->device)]; | |
4779 | ||
4780 | event->event = RDMA_CM_EVENT_MULTICAST_JOIN; | |
4781 | if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num, | |
4782 | &multicast->rec, ndev, gid_type, | |
4783 | &event->param.ud.ah_attr)) { | |
4784 | event->event = RDMA_CM_EVENT_MULTICAST_ERROR; | |
4785 | goto out; | |
4786 | } | |
6d337179 | 4787 | |
b5de0c60 JG |
4788 | event->param.ud.qp_num = 0xFFFFFF; |
4789 | event->param.ud.qkey = be32_to_cpu(multicast->rec.qkey); | |
c8f6a362 | 4790 | |
b5de0c60 JG |
4791 | out: |
4792 | if (ndev) | |
4793 | dev_put(ndev); | |
4794 | } | |
f685c195 | 4795 | |
c8f6a362 SH |
4796 | static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) |
4797 | { | |
c8f6a362 | 4798 | struct cma_multicast *mc = multicast->context; |
b5de0c60 | 4799 | struct rdma_id_private *id_priv = mc->id_priv; |
7582df82 | 4800 | struct rdma_cm_event event = {}; |
37e07cda | 4801 | int ret = 0; |
c8f6a362 | 4802 | |
37e07cda | 4803 | mutex_lock(&id_priv->handler_mutex); |
5cfbf929 JG |
4804 | if (READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL || |
4805 | READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING) | |
37e07cda | 4806 | goto out; |
c8f6a362 | 4807 | |
b5de0c60 | 4808 | cma_make_mc_event(status, id_priv, multicast, &event, mc); |
ed999f82 | 4809 | ret = cma_cm_event_handler(id_priv, &event); |
f685c195 | 4810 | rdma_destroy_ah_attr(&event.param.ud.ah_attr); |
fe454dc3 | 4811 | WARN_ON(ret); |
8aa08602 | 4812 | |
37e07cda | 4813 | out: |
de910bd9 | 4814 | mutex_unlock(&id_priv->handler_mutex); |
c8f6a362 SH |
4815 | return 0; |
4816 | } | |
4817 | ||
4818 | static void cma_set_mgid(struct rdma_id_private *id_priv, | |
4819 | struct sockaddr *addr, union ib_gid *mgid) | |
4820 | { | |
4821 | unsigned char mc_map[MAX_ADDR_LEN]; | |
4822 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; | |
4823 | struct sockaddr_in *sin = (struct sockaddr_in *) addr; | |
4824 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; | |
4825 | ||
4826 | if (cma_any_addr(addr)) { | |
4827 | memset(mgid, 0, sizeof *mgid); | |
4828 | } else if ((addr->sa_family == AF_INET6) && | |
1c9b2819 | 4829 | ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) == |
c8f6a362 SH |
4830 | 0xFF10A01B)) { |
4831 | /* IPv6 address is an SA assigned MGID. */ | |
4832 | memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); | |
5bc2b7b3 SH |
4833 | } else if (addr->sa_family == AF_IB) { |
4834 | memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid); | |
076dd53b | 4835 | } else if (addr->sa_family == AF_INET6) { |
e2e62697 JG |
4836 | ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map); |
4837 | if (id_priv->id.ps == RDMA_PS_UDP) | |
4838 | mc_map[7] = 0x01; /* Use RDMA CM signature */ | |
4839 | *mgid = *(union ib_gid *) (mc_map + 4); | |
c8f6a362 | 4840 | } else { |
a9e527e3 | 4841 | ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); |
c8f6a362 SH |
4842 | if (id_priv->id.ps == RDMA_PS_UDP) |
4843 | mc_map[7] = 0x01; /* Use RDMA CM signature */ | |
c8f6a362 SH |
4844 | *mgid = *(union ib_gid *) (mc_map + 4); |
4845 | } | |
4846 | } | |
4847 | ||
4848 | static int cma_join_ib_multicast(struct rdma_id_private *id_priv, | |
4849 | struct cma_multicast *mc) | |
4850 | { | |
4851 | struct ib_sa_mcmember_rec rec; | |
4852 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; | |
4853 | ib_sa_comp_mask comp_mask; | |
4854 | int ret; | |
4855 | ||
4856 | ib_addr_get_mgid(dev_addr, &rec.mgid); | |
4857 | ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, | |
4858 | &rec.mgid, &rec); | |
4859 | if (ret) | |
4860 | return ret; | |
4861 | ||
5bc2b7b3 SH |
4862 | ret = cma_set_qkey(id_priv, 0); |
4863 | if (ret) | |
4864 | return ret; | |
4865 | ||
3f446754 | 4866 | cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); |
5bc2b7b3 | 4867 | rec.qkey = cpu_to_be32(id_priv->qkey); |
6f8372b6 | 4868 | rdma_addr_get_sgid(dev_addr, &rec.port_gid); |
c8f6a362 | 4869 | rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); |
ab15c95a AV |
4870 | rec.join_state = mc->join_state; |
4871 | ||
c8f6a362 SH |
4872 | comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | |
4873 | IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | | |
4874 | IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL | | |
4875 | IB_SA_MCMEMBER_REC_FLOW_LABEL | | |
4876 | IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; | |
4877 | ||
84adeee9 YE |
4878 | if (id_priv->id.ps == RDMA_PS_IPOIB) |
4879 | comp_mask |= IB_SA_MCMEMBER_REC_RATE | | |
2a22fb8c DB |
4880 | IB_SA_MCMEMBER_REC_RATE_SELECTOR | |
4881 | IB_SA_MCMEMBER_REC_MTU_SELECTOR | | |
4882 | IB_SA_MCMEMBER_REC_MTU | | |
4883 | IB_SA_MCMEMBER_REC_HOP_LIMIT; | |
84adeee9 | 4884 | |
b5de0c60 JG |
4885 | mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device, |
4886 | id_priv->id.port_num, &rec, comp_mask, | |
4887 | GFP_KERNEL, cma_ib_mc_handler, mc); | |
4888 | return PTR_ERR_OR_ZERO(mc->sa_mc); | |
3c86aa70 EC |
4889 | } |
4890 | ||
be1d325a NO |
4891 | static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, |
4892 | enum ib_gid_type gid_type) | |
3c86aa70 EC |
4893 | { |
4894 | struct sockaddr_in *sin = (struct sockaddr_in *)addr; | |
4895 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; | |
4896 | ||
4897 | if (cma_any_addr(addr)) { | |
4898 | memset(mgid, 0, sizeof *mgid); | |
4899 | } else if (addr->sa_family == AF_INET6) { | |
4900 | memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); | |
4901 | } else { | |
5c181bda PP |
4902 | mgid->raw[0] = |
4903 | (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0xff; | |
4904 | mgid->raw[1] = | |
4905 | (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0x0e; | |
3c86aa70 EC |
4906 | mgid->raw[2] = 0; |
4907 | mgid->raw[3] = 0; | |
4908 | mgid->raw[4] = 0; | |
4909 | mgid->raw[5] = 0; | |
4910 | mgid->raw[6] = 0; | |
4911 | mgid->raw[7] = 0; | |
4912 | mgid->raw[8] = 0; | |
4913 | mgid->raw[9] = 0; | |
4914 | mgid->raw[10] = 0xff; | |
4915 | mgid->raw[11] = 0xff; | |
4916 | *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr; | |
4917 | } | |
4918 | } | |
4919 | ||
4920 | static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, | |
4921 | struct cma_multicast *mc) | |
4922 | { | |
3c86aa70 | 4923 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; |
bee3c3c9 | 4924 | int err = 0; |
3c86aa70 EC |
4925 | struct sockaddr *addr = (struct sockaddr *)&mc->addr; |
4926 | struct net_device *ndev = NULL; | |
b5de0c60 | 4927 | struct ib_sa_multicast ib; |
bee3c3c9 | 4928 | enum ib_gid_type gid_type; |
ab15c95a AV |
4929 | bool send_only; |
4930 | ||
4931 | send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); | |
3c86aa70 | 4932 | |
b5de0c60 | 4933 | if (cma_zero_addr(addr)) |
3c86aa70 EC |
4934 | return -EINVAL; |
4935 | ||
be1d325a NO |
4936 | gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - |
4937 | rdma_start_port(id_priv->cma_dev->device)]; | |
b5de0c60 | 4938 | cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type); |
3c86aa70 | 4939 | |
b5de0c60 | 4940 | ib.rec.pkey = cpu_to_be16(0xffff); |
3c86aa70 | 4941 | if (id_priv->id.ps == RDMA_PS_UDP) |
b5de0c60 | 4942 | ib.rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); |
3c86aa70 EC |
4943 | |
4944 | if (dev_addr->bound_dev_if) | |
052eac6e | 4945 | ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); |
fe454dc3 AH |
4946 | if (!ndev) |
4947 | return -ENODEV; | |
4948 | ||
b5de0c60 JG |
4949 | ib.rec.rate = iboe_get_rate(ndev); |
4950 | ib.rec.hop_limit = 1; | |
4951 | ib.rec.mtu = iboe_get_mtu(ndev->mtu); | |
bee3c3c9 | 4952 | |
bee3c3c9 | 4953 | if (addr->sa_family == AF_INET) { |
c65f6c5a | 4954 | if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { |
b5de0c60 | 4955 | ib.rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; |
ab15c95a | 4956 | if (!send_only) { |
b5de0c60 | 4957 | err = cma_igmp_send(ndev, &ib.rec.mgid, |
ab15c95a | 4958 | true); |
ab15c95a | 4959 | } |
bee3c3c9 MS |
4960 | } |
4961 | } else { | |
4962 | if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) | |
4963 | err = -ENOTSUPP; | |
4964 | } | |
3c86aa70 | 4965 | dev_put(ndev); |
fe454dc3 AH |
4966 | if (err || !ib.rec.mtu) |
4967 | return err ?: -EINVAL; | |
4968 | ||
7b85627b | 4969 | rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, |
b5de0c60 | 4970 | &ib.rec.port_gid); |
fe454dc3 AH |
4971 | INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler); |
4972 | cma_make_mc_event(0, id_priv, &ib, &mc->iboe_join.event, mc); | |
4973 | queue_work(cma_wq, &mc->iboe_join.work); | |
3c86aa70 | 4974 | return 0; |
3c86aa70 EC |
4975 | } |
4976 | ||
c8f6a362 | 4977 | int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, |
ab15c95a | 4978 | u8 join_state, void *context) |
c8f6a362 | 4979 | { |
5cfbf929 JG |
4980 | struct rdma_id_private *id_priv = |
4981 | container_of(id, struct rdma_id_private, id); | |
c8f6a362 SH |
4982 | struct cma_multicast *mc; |
4983 | int ret; | |
4984 | ||
1bb5091d JG |
4985 | /* Not supported for kernel QPs */ |
4986 | if (WARN_ON(id->qp)) | |
7688f2c3 LR |
4987 | return -EINVAL; |
4988 | ||
5cfbf929 JG |
4989 | /* ULP is calling this wrong. */ |
4990 | if (!id->device || (READ_ONCE(id_priv->state) != RDMA_CM_ADDR_BOUND && | |
4991 | READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED)) | |
c8f6a362 SH |
4992 | return -EINVAL; |
4993 | ||
b5de0c60 | 4994 | mc = kzalloc(sizeof(*mc), GFP_KERNEL); |
c8f6a362 SH |
4995 | if (!mc) |
4996 | return -ENOMEM; | |
4997 | ||
ef560861 | 4998 | memcpy(&mc->addr, addr, rdma_addr_size(addr)); |
c8f6a362 SH |
4999 | mc->context = context; |
5000 | mc->id_priv = id_priv; | |
ab15c95a | 5001 | mc->join_state = join_state; |
c8f6a362 | 5002 | |
5d9fb044 | 5003 | if (rdma_protocol_roce(id->device, id->port_num)) { |
5c9a5282 | 5004 | ret = cma_iboe_join_multicast(id_priv, mc); |
c0126915 JG |
5005 | if (ret) |
5006 | goto out_err; | |
5007 | } else if (rdma_cap_ib_mcast(id->device, id->port_num)) { | |
5c9a5282 | 5008 | ret = cma_join_ib_multicast(id_priv, mc); |
c0126915 JG |
5009 | if (ret) |
5010 | goto out_err; | |
5011 | } else { | |
c8f6a362 | 5012 | ret = -ENOSYS; |
c0126915 | 5013 | goto out_err; |
c8f6a362 | 5014 | } |
c0126915 JG |
5015 | |
5016 | spin_lock(&id_priv->lock); | |
5017 | list_add(&mc->list, &id_priv->mc_list); | |
5018 | spin_unlock(&id_priv->lock); | |
5019 | ||
5020 | return 0; | |
5021 | out_err: | |
5022 | kfree(mc); | |
c8f6a362 SH |
5023 | return ret; |
5024 | } | |
5025 | EXPORT_SYMBOL(rdma_join_multicast); | |
5026 | ||
5027 | void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) | |
5028 | { | |
5029 | struct rdma_id_private *id_priv; | |
5030 | struct cma_multicast *mc; | |
5031 | ||
5032 | id_priv = container_of(id, struct rdma_id_private, id); | |
5033 | spin_lock_irq(&id_priv->lock); | |
5034 | list_for_each_entry(mc, &id_priv->mc_list, list) { | |
3788d299 JG |
5035 | if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0) |
5036 | continue; | |
5037 | list_del(&mc->list); | |
5038 | spin_unlock_irq(&id_priv->lock); | |
5c9a5282 | 5039 | |
3788d299 JG |
5040 | WARN_ON(id_priv->cma_dev->device != id->device); |
5041 | destroy_mc(id_priv, mc); | |
5042 | return; | |
c8f6a362 SH |
5043 | } |
5044 | spin_unlock_irq(&id_priv->lock); | |
5045 | } | |
5046 | EXPORT_SYMBOL(rdma_leave_multicast); | |
5047 | ||
dd5bdff8 OG |
5048 | static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) |
5049 | { | |
5050 | struct rdma_dev_addr *dev_addr; | |
7e85bcda | 5051 | struct cma_work *work; |
dd5bdff8 OG |
5052 | |
5053 | dev_addr = &id_priv->id.route.addr.dev_addr; | |
5054 | ||
6266ed6e | 5055 | if ((dev_addr->bound_dev_if == ndev->ifindex) && |
fa20105e | 5056 | (net_eq(dev_net(ndev), dev_addr->net)) && |
dd5bdff8 | 5057 | memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { |
aba25a3e PP |
5058 | pr_info("RDMA CM addr change for ndev %s used by id %p\n", |
5059 | ndev->name, &id_priv->id); | |
dd5bdff8 OG |
5060 | work = kzalloc(sizeof *work, GFP_KERNEL); |
5061 | if (!work) | |
5062 | return -ENOMEM; | |
5063 | ||
7e85bcda | 5064 | INIT_WORK(&work->work, cma_work_handler); |
dd5bdff8 OG |
5065 | work->id = id_priv; |
5066 | work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; | |
e368d23f | 5067 | cma_id_get(id_priv); |
dd5bdff8 OG |
5068 | queue_work(cma_wq, &work->work); |
5069 | } | |
5070 | ||
5071 | return 0; | |
5072 | } | |
5073 | ||
5074 | static int cma_netdev_callback(struct notifier_block *self, unsigned long event, | |
351638e7 | 5075 | void *ptr) |
dd5bdff8 | 5076 | { |
351638e7 | 5077 | struct net_device *ndev = netdev_notifier_info_to_dev(ptr); |
dd5bdff8 OG |
5078 | struct cma_device *cma_dev; |
5079 | struct rdma_id_private *id_priv; | |
5080 | int ret = NOTIFY_DONE; | |
5081 | ||
dd5bdff8 OG |
5082 | if (event != NETDEV_BONDING_FAILOVER) |
5083 | return NOTIFY_DONE; | |
5084 | ||
3cd96fdd | 5085 | if (!netif_is_bond_master(ndev)) |
dd5bdff8 OG |
5086 | return NOTIFY_DONE; |
5087 | ||
5088 | mutex_lock(&lock); | |
5089 | list_for_each_entry(cma_dev, &dev_list, list) | |
99cfddb8 | 5090 | list_for_each_entry(id_priv, &cma_dev->id_list, device_item) { |
dd5bdff8 OG |
5091 | ret = cma_netdev_change(ndev, id_priv); |
5092 | if (ret) | |
5093 | goto out; | |
5094 | } | |
5095 | ||
5096 | out: | |
5097 | mutex_unlock(&lock); | |
5098 | return ret; | |
5099 | } | |
5100 | ||
925d046e PH |
5101 | static void cma_netevent_work_handler(struct work_struct *_work) |
5102 | { | |
5103 | struct rdma_id_private *id_priv = | |
5104 | container_of(_work, struct rdma_id_private, id.net_work); | |
5105 | struct rdma_cm_event event = {}; | |
5106 | ||
5107 | mutex_lock(&id_priv->handler_mutex); | |
5108 | ||
5109 | if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || | |
5110 | READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) | |
5111 | goto out_unlock; | |
5112 | ||
5113 | event.event = RDMA_CM_EVENT_UNREACHABLE; | |
5114 | event.status = -ETIMEDOUT; | |
5115 | ||
5116 | if (cma_cm_event_handler(id_priv, &event)) { | |
5117 | __acquire(&id_priv->handler_mutex); | |
5118 | id_priv->cm_id.ib = NULL; | |
5119 | cma_id_put(id_priv); | |
5120 | destroy_id_handler_unlock(id_priv); | |
5121 | return; | |
5122 | } | |
5123 | ||
5124 | out_unlock: | |
5125 | mutex_unlock(&id_priv->handler_mutex); | |
5126 | cma_id_put(id_priv); | |
5127 | } | |
5128 | ||
5129 | static int cma_netevent_callback(struct notifier_block *self, | |
5130 | unsigned long event, void *ctx) | |
5131 | { | |
5132 | struct id_table_entry *ips_node = NULL; | |
5133 | struct rdma_id_private *current_id; | |
5134 | struct neighbour *neigh = ctx; | |
5135 | unsigned long flags; | |
5136 | ||
5137 | if (event != NETEVENT_NEIGH_UPDATE) | |
5138 | return NOTIFY_DONE; | |
5139 | ||
5140 | spin_lock_irqsave(&id_table_lock, flags); | |
5141 | if (neigh->tbl->family == AF_INET6) { | |
5142 | struct sockaddr_in6 neigh_sock_6; | |
5143 | ||
5144 | neigh_sock_6.sin6_family = AF_INET6; | |
5145 | neigh_sock_6.sin6_addr = *(struct in6_addr *)neigh->primary_key; | |
5146 | ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex, | |
5147 | (struct sockaddr *)&neigh_sock_6); | |
5148 | } else if (neigh->tbl->family == AF_INET) { | |
5149 | struct sockaddr_in neigh_sock_4; | |
5150 | ||
5151 | neigh_sock_4.sin_family = AF_INET; | |
5152 | neigh_sock_4.sin_addr.s_addr = *(__be32 *)(neigh->primary_key); | |
5153 | ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex, | |
5154 | (struct sockaddr *)&neigh_sock_4); | |
5155 | } else | |
5156 | goto out; | |
5157 | ||
5158 | if (!ips_node) | |
5159 | goto out; | |
5160 | ||
5161 | list_for_each_entry(current_id, &ips_node->id_list, id_list_entry) { | |
5162 | if (!memcmp(current_id->id.route.addr.dev_addr.dst_dev_addr, | |
5163 | neigh->ha, ETH_ALEN)) | |
5164 | continue; | |
5165 | INIT_WORK(¤t_id->id.net_work, cma_netevent_work_handler); | |
5166 | cma_id_get(current_id); | |
5167 | queue_work(cma_wq, ¤t_id->id.net_work); | |
5168 | } | |
5169 | out: | |
5170 | spin_unlock_irqrestore(&id_table_lock, flags); | |
5171 | return NOTIFY_DONE; | |
5172 | } | |
5173 | ||
dd5bdff8 OG |
5174 | static struct notifier_block cma_nb = { |
5175 | .notifier_call = cma_netdev_callback | |
5176 | }; | |
5177 | ||
925d046e PH |
5178 | static struct notifier_block cma_netevent_cb = { |
5179 | .notifier_call = cma_netevent_callback | |
5180 | }; | |
5181 | ||
3647a28d | 5182 | static void cma_send_device_removal_put(struct rdma_id_private *id_priv) |
e51060f0 | 5183 | { |
3647a28d | 5184 | struct rdma_cm_event event = { .event = RDMA_CM_EVENT_DEVICE_REMOVAL }; |
550e5ca7 | 5185 | enum rdma_cm_state state; |
3647a28d | 5186 | unsigned long flags; |
e51060f0 | 5187 | |
de910bd9 | 5188 | mutex_lock(&id_priv->handler_mutex); |
3647a28d JG |
5189 | /* Record that we want to remove the device */ |
5190 | spin_lock_irqsave(&id_priv->lock, flags); | |
5191 | state = id_priv->state; | |
5192 | if (state == RDMA_CM_DESTROYING || state == RDMA_CM_DEVICE_REMOVAL) { | |
5193 | spin_unlock_irqrestore(&id_priv->lock, flags); | |
5194 | mutex_unlock(&id_priv->handler_mutex); | |
5195 | cma_id_put(id_priv); | |
5196 | return; | |
5197 | } | |
5198 | id_priv->state = RDMA_CM_DEVICE_REMOVAL; | |
5199 | spin_unlock_irqrestore(&id_priv->lock, flags); | |
e51060f0 | 5200 | |
3647a28d JG |
5201 | if (cma_cm_event_handler(id_priv, &event)) { |
5202 | /* | |
5203 | * At this point the ULP promises it won't call | |
5204 | * rdma_destroy_id() concurrently | |
5205 | */ | |
5206 | cma_id_put(id_priv); | |
5207 | mutex_unlock(&id_priv->handler_mutex); | |
f6a9d47a JG |
5208 | trace_cm_id_destroy(id_priv); |
5209 | _destroy_id(id_priv, state); | |
3647a28d JG |
5210 | return; |
5211 | } | |
de910bd9 | 5212 | mutex_unlock(&id_priv->handler_mutex); |
3647a28d JG |
5213 | |
5214 | /* | |
5215 | * If this races with destroy then the thread that first assigns state | |
5216 | * to a destroying does the cancel. | |
5217 | */ | |
5218 | cma_cancel_operation(id_priv, state); | |
5219 | cma_id_put(id_priv); | |
e51060f0 SH |
5220 | } |
5221 | ||
5222 | static void cma_process_remove(struct cma_device *cma_dev) | |
5223 | { | |
e51060f0 SH |
5224 | mutex_lock(&lock); |
5225 | while (!list_empty(&cma_dev->id_list)) { | |
3647a28d | 5226 | struct rdma_id_private *id_priv = list_first_entry( |
99cfddb8 | 5227 | &cma_dev->id_list, struct rdma_id_private, device_item); |
e51060f0 | 5228 | |
99cfddb8 JG |
5229 | list_del_init(&id_priv->listen_item); |
5230 | list_del_init(&id_priv->device_item); | |
e368d23f | 5231 | cma_id_get(id_priv); |
e51060f0 SH |
5232 | mutex_unlock(&lock); |
5233 | ||
3647a28d | 5234 | cma_send_device_removal_put(id_priv); |
e51060f0 SH |
5235 | |
5236 | mutex_lock(&lock); | |
5237 | } | |
5238 | mutex_unlock(&lock); | |
5239 | ||
5ff8c8fa | 5240 | cma_dev_put(cma_dev); |
e51060f0 SH |
5241 | wait_for_completion(&cma_dev->comp); |
5242 | } | |
5243 | ||
4d51c3d9 PP |
5244 | static bool cma_supported(struct ib_device *device) |
5245 | { | |
5246 | u32 i; | |
5247 | ||
5248 | rdma_for_each_port(device, i) { | |
5249 | if (rdma_cap_ib_cm(device, i) || rdma_cap_iw_cm(device, i)) | |
5250 | return true; | |
5251 | } | |
5252 | return false; | |
5253 | } | |
5254 | ||
c80a0c52 LR |
5255 | static int cma_add_one(struct ib_device *device) |
5256 | { | |
dd37d2f5 | 5257 | struct rdma_id_private *to_destroy; |
c80a0c52 LR |
5258 | struct cma_device *cma_dev; |
5259 | struct rdma_id_private *id_priv; | |
c80a0c52 LR |
5260 | unsigned long supported_gids = 0; |
5261 | int ret; | |
1fb7f897 | 5262 | u32 i; |
c80a0c52 | 5263 | |
4d51c3d9 PP |
5264 | if (!cma_supported(device)) |
5265 | return -EOPNOTSUPP; | |
5266 | ||
c80a0c52 LR |
5267 | cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL); |
5268 | if (!cma_dev) | |
5269 | return -ENOMEM; | |
5270 | ||
5271 | cma_dev->device = device; | |
5272 | cma_dev->default_gid_type = kcalloc(device->phys_port_cnt, | |
5273 | sizeof(*cma_dev->default_gid_type), | |
5274 | GFP_KERNEL); | |
5275 | if (!cma_dev->default_gid_type) { | |
5276 | ret = -ENOMEM; | |
5277 | goto free_cma_dev; | |
5278 | } | |
5279 | ||
5280 | cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt, | |
5281 | sizeof(*cma_dev->default_roce_tos), | |
5282 | GFP_KERNEL); | |
5283 | if (!cma_dev->default_roce_tos) { | |
5284 | ret = -ENOMEM; | |
5285 | goto free_gid_type; | |
5286 | } | |
5287 | ||
5288 | rdma_for_each_port (device, i) { | |
5289 | supported_gids = roce_gid_type_mask_support(device, i); | |
5290 | WARN_ON(!supported_gids); | |
5291 | if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE)) | |
5292 | cma_dev->default_gid_type[i - rdma_start_port(device)] = | |
5293 | CMA_PREFERRED_ROCE_GID_TYPE; | |
5294 | else | |
5295 | cma_dev->default_gid_type[i - rdma_start_port(device)] = | |
5296 | find_first_bit(&supported_gids, BITS_PER_LONG); | |
5297 | cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0; | |
5298 | } | |
5299 | ||
5300 | init_completion(&cma_dev->comp); | |
5301 | refcount_set(&cma_dev->refcount, 1); | |
5302 | INIT_LIST_HEAD(&cma_dev->id_list); | |
5303 | ib_set_client_data(device, &cma_client, cma_dev); | |
5304 | ||
5305 | mutex_lock(&lock); | |
5306 | list_add_tail(&cma_dev->list, &dev_list); | |
99cfddb8 | 5307 | list_for_each_entry(id_priv, &listen_any_list, listen_any_item) { |
dd37d2f5 | 5308 | ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); |
c80a0c52 LR |
5309 | if (ret) |
5310 | goto free_listen; | |
5311 | } | |
5312 | mutex_unlock(&lock); | |
5313 | ||
5314 | trace_cm_add_one(device); | |
5315 | return 0; | |
5316 | ||
5317 | free_listen: | |
5318 | list_del(&cma_dev->list); | |
5319 | mutex_unlock(&lock); | |
5320 | ||
dd37d2f5 | 5321 | /* cma_process_remove() will delete to_destroy */ |
c80a0c52 LR |
5322 | cma_process_remove(cma_dev); |
5323 | kfree(cma_dev->default_roce_tos); | |
5324 | free_gid_type: | |
5325 | kfree(cma_dev->default_gid_type); | |
5326 | ||
5327 | free_cma_dev: | |
5328 | kfree(cma_dev); | |
5329 | return ret; | |
5330 | } | |
5331 | ||
7c1eb45a | 5332 | static void cma_remove_one(struct ib_device *device, void *client_data) |
e51060f0 | 5333 | { |
7c1eb45a | 5334 | struct cma_device *cma_dev = client_data; |
e51060f0 | 5335 | |
ed999f82 CL |
5336 | trace_cm_remove_one(device); |
5337 | ||
e51060f0 SH |
5338 | mutex_lock(&lock); |
5339 | list_del(&cma_dev->list); | |
5340 | mutex_unlock(&lock); | |
5341 | ||
5342 | cma_process_remove(cma_dev); | |
89052d78 | 5343 | kfree(cma_dev->default_roce_tos); |
045959db | 5344 | kfree(cma_dev->default_gid_type); |
e51060f0 SH |
5345 | kfree(cma_dev); |
5346 | } | |
5347 | ||
4be74b42 HE |
5348 | static int cma_init_net(struct net *net) |
5349 | { | |
5350 | struct cma_pernet *pernet = cma_pernet(net); | |
5351 | ||
63826753 MW |
5352 | xa_init(&pernet->tcp_ps); |
5353 | xa_init(&pernet->udp_ps); | |
5354 | xa_init(&pernet->ipoib_ps); | |
5355 | xa_init(&pernet->ib_ps); | |
4be74b42 HE |
5356 | |
5357 | return 0; | |
5358 | } | |
5359 | ||
5360 | static void cma_exit_net(struct net *net) | |
5361 | { | |
5362 | struct cma_pernet *pernet = cma_pernet(net); | |
5363 | ||
63826753 MW |
5364 | WARN_ON(!xa_empty(&pernet->tcp_ps)); |
5365 | WARN_ON(!xa_empty(&pernet->udp_ps)); | |
5366 | WARN_ON(!xa_empty(&pernet->ipoib_ps)); | |
5367 | WARN_ON(!xa_empty(&pernet->ib_ps)); | |
4be74b42 HE |
5368 | } |
5369 | ||
5370 | static struct pernet_operations cma_pernet_operations = { | |
5371 | .init = cma_init_net, | |
5372 | .exit = cma_exit_net, | |
5373 | .id = &cma_pernet_id, | |
5374 | .size = sizeof(struct cma_pernet), | |
5375 | }; | |
5376 | ||
716abb1f | 5377 | static int __init cma_init(void) |
e51060f0 | 5378 | { |
5d7220e8 | 5379 | int ret; |
227b60f5 | 5380 | |
32ac9e43 JG |
5381 | /* |
5382 | * There is a rare lock ordering dependency in cma_netdev_callback() | |
5383 | * that only happens when bonding is enabled. Teach lockdep that rtnl | |
5384 | * must never be nested under lock so it can find these without having | |
5385 | * to test with bonding. | |
5386 | */ | |
5387 | if (IS_ENABLED(CONFIG_LOCKDEP)) { | |
5388 | rtnl_lock(); | |
5389 | mutex_lock(&lock); | |
5390 | mutex_unlock(&lock); | |
5391 | rtnl_unlock(); | |
5392 | } | |
5393 | ||
dee9acbb | 5394 | cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM); |
e51060f0 SH |
5395 | if (!cma_wq) |
5396 | return -ENOMEM; | |
5397 | ||
4be74b42 HE |
5398 | ret = register_pernet_subsys(&cma_pernet_operations); |
5399 | if (ret) | |
5400 | goto err_wq; | |
5401 | ||
c1a0b23b | 5402 | ib_sa_register_client(&sa_client); |
dd5bdff8 | 5403 | register_netdevice_notifier(&cma_nb); |
925d046e | 5404 | register_netevent_notifier(&cma_netevent_cb); |
c1a0b23b | 5405 | |
e51060f0 SH |
5406 | ret = ib_register_client(&cma_client); |
5407 | if (ret) | |
5408 | goto err; | |
753f618a | 5409 | |
a7bfb93f | 5410 | ret = cma_configfs_init(); |
5411 | if (ret) | |
5412 | goto err_ib; | |
753f618a | 5413 | |
e51060f0 SH |
5414 | return 0; |
5415 | ||
a7bfb93f | 5416 | err_ib: |
5417 | ib_unregister_client(&cma_client); | |
e51060f0 | 5418 | err: |
925d046e | 5419 | unregister_netevent_notifier(&cma_netevent_cb); |
dd5bdff8 | 5420 | unregister_netdevice_notifier(&cma_nb); |
c1a0b23b | 5421 | ib_sa_unregister_client(&sa_client); |
44a7b675 | 5422 | unregister_pernet_subsys(&cma_pernet_operations); |
4be74b42 | 5423 | err_wq: |
e51060f0 SH |
5424 | destroy_workqueue(cma_wq); |
5425 | return ret; | |
5426 | } | |
5427 | ||
716abb1f | 5428 | static void __exit cma_cleanup(void) |
e51060f0 | 5429 | { |
045959db | 5430 | cma_configfs_exit(); |
e51060f0 | 5431 | ib_unregister_client(&cma_client); |
925d046e | 5432 | unregister_netevent_notifier(&cma_netevent_cb); |
dd5bdff8 | 5433 | unregister_netdevice_notifier(&cma_nb); |
c1a0b23b | 5434 | ib_sa_unregister_client(&sa_client); |
4be74b42 | 5435 | unregister_pernet_subsys(&cma_pernet_operations); |
e51060f0 | 5436 | destroy_workqueue(cma_wq); |
e51060f0 SH |
5437 | } |
5438 | ||
5439 | module_init(cma_init); | |
5440 | module_exit(cma_cleanup); |