Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. | |
3 | * Copyright (c) 2004 Infinicon Corporation. All rights reserved. | |
4 | * Copyright (c) 2004 Intel Corporation. All rights reserved. | |
5 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. | |
6 | * Copyright (c) 2004 Voltaire Corporation. All rights reserved. | |
2a1d9b7f | 7 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
33b9b3ee | 8 | * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. |
1da177e4 LT |
9 | * |
10 | * This software is available to you under a choice of one of two | |
11 | * licenses. You may choose to be licensed under the terms of the GNU | |
12 | * General Public License (GPL) Version 2, available from the file | |
13 | * COPYING in the main directory of this source tree, or the | |
14 | * OpenIB.org BSD license below: | |
15 | * | |
16 | * Redistribution and use in source and binary forms, with or | |
17 | * without modification, are permitted provided that the following | |
18 | * conditions are met: | |
19 | * | |
20 | * - Redistributions of source code must retain the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer. | |
23 | * | |
24 | * - Redistributions in binary form must reproduce the above | |
25 | * copyright notice, this list of conditions and the following | |
26 | * disclaimer in the documentation and/or other materials | |
27 | * provided with the distribution. | |
28 | * | |
29 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
30 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
31 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
32 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
33 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
34 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
35 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
36 | * SOFTWARE. | |
1da177e4 LT |
37 | */ |
38 | ||
39 | #include <linux/errno.h> | |
40 | #include <linux/err.h> | |
b108d976 | 41 | #include <linux/export.h> |
8c65b4a6 | 42 | #include <linux/string.h> |
0e0ec7e0 | 43 | #include <linux/slab.h> |
dbf727de MB |
44 | #include <linux/in.h> |
45 | #include <linux/in6.h> | |
46 | #include <net/addrconf.h> | |
d291f1a6 | 47 | #include <linux/security.h> |
1da177e4 | 48 | |
a4d61e84 RD |
49 | #include <rdma/ib_verbs.h> |
50 | #include <rdma/ib_cache.h> | |
dd5f03be | 51 | #include <rdma/ib_addr.h> |
a060b562 | 52 | #include <rdma/rw.h> |
1da177e4 | 53 | |
ed4c54e5 | 54 | #include "core_priv.h" |
1da177e4 | 55 | |
c0348eb0 PP |
56 | static int ib_resolve_eth_dmac(struct ib_device *device, |
57 | struct rdma_ah_attr *ah_attr); | |
58 | ||
2b1b5b60 SG |
59 | static const char * const ib_events[] = { |
60 | [IB_EVENT_CQ_ERR] = "CQ error", | |
61 | [IB_EVENT_QP_FATAL] = "QP fatal error", | |
62 | [IB_EVENT_QP_REQ_ERR] = "QP request error", | |
63 | [IB_EVENT_QP_ACCESS_ERR] = "QP access error", | |
64 | [IB_EVENT_COMM_EST] = "communication established", | |
65 | [IB_EVENT_SQ_DRAINED] = "send queue drained", | |
66 | [IB_EVENT_PATH_MIG] = "path migration successful", | |
67 | [IB_EVENT_PATH_MIG_ERR] = "path migration error", | |
68 | [IB_EVENT_DEVICE_FATAL] = "device fatal error", | |
69 | [IB_EVENT_PORT_ACTIVE] = "port active", | |
70 | [IB_EVENT_PORT_ERR] = "port error", | |
71 | [IB_EVENT_LID_CHANGE] = "LID change", | |
72 | [IB_EVENT_PKEY_CHANGE] = "P_key change", | |
73 | [IB_EVENT_SM_CHANGE] = "SM change", | |
74 | [IB_EVENT_SRQ_ERR] = "SRQ error", | |
75 | [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached", | |
76 | [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached", | |
77 | [IB_EVENT_CLIENT_REREGISTER] = "client reregister", | |
78 | [IB_EVENT_GID_CHANGE] = "GID changed", | |
79 | }; | |
80 | ||
db7489e0 | 81 | const char *__attribute_const__ ib_event_msg(enum ib_event_type event) |
2b1b5b60 SG |
82 | { |
83 | size_t index = event; | |
84 | ||
85 | return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ? | |
86 | ib_events[index] : "unrecognized event"; | |
87 | } | |
88 | EXPORT_SYMBOL(ib_event_msg); | |
89 | ||
90 | static const char * const wc_statuses[] = { | |
91 | [IB_WC_SUCCESS] = "success", | |
92 | [IB_WC_LOC_LEN_ERR] = "local length error", | |
93 | [IB_WC_LOC_QP_OP_ERR] = "local QP operation error", | |
94 | [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error", | |
95 | [IB_WC_LOC_PROT_ERR] = "local protection error", | |
96 | [IB_WC_WR_FLUSH_ERR] = "WR flushed", | |
97 | [IB_WC_MW_BIND_ERR] = "memory management operation error", | |
98 | [IB_WC_BAD_RESP_ERR] = "bad response error", | |
99 | [IB_WC_LOC_ACCESS_ERR] = "local access error", | |
100 | [IB_WC_REM_INV_REQ_ERR] = "invalid request error", | |
101 | [IB_WC_REM_ACCESS_ERR] = "remote access error", | |
102 | [IB_WC_REM_OP_ERR] = "remote operation error", | |
103 | [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded", | |
104 | [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded", | |
105 | [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error", | |
106 | [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request", | |
107 | [IB_WC_REM_ABORT_ERR] = "operation aborted", | |
108 | [IB_WC_INV_EECN_ERR] = "invalid EE context number", | |
109 | [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state", | |
110 | [IB_WC_FATAL_ERR] = "fatal error", | |
111 | [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error", | |
112 | [IB_WC_GENERAL_ERR] = "general error", | |
113 | }; | |
114 | ||
db7489e0 | 115 | const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status) |
2b1b5b60 SG |
116 | { |
117 | size_t index = status; | |
118 | ||
119 | return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ? | |
120 | wc_statuses[index] : "unrecognized status"; | |
121 | } | |
122 | EXPORT_SYMBOL(ib_wc_status_msg); | |
123 | ||
8385fd84 | 124 | __attribute_const__ int ib_rate_to_mult(enum ib_rate rate) |
bf6a9e31 JM |
125 | { |
126 | switch (rate) { | |
e2dda368 HWR |
127 | case IB_RATE_2_5_GBPS: return 1; |
128 | case IB_RATE_5_GBPS: return 2; | |
129 | case IB_RATE_10_GBPS: return 4; | |
130 | case IB_RATE_20_GBPS: return 8; | |
131 | case IB_RATE_30_GBPS: return 12; | |
132 | case IB_RATE_40_GBPS: return 16; | |
133 | case IB_RATE_60_GBPS: return 24; | |
134 | case IB_RATE_80_GBPS: return 32; | |
135 | case IB_RATE_120_GBPS: return 48; | |
136 | case IB_RATE_14_GBPS: return 6; | |
137 | case IB_RATE_56_GBPS: return 22; | |
138 | case IB_RATE_112_GBPS: return 45; | |
139 | case IB_RATE_168_GBPS: return 67; | |
140 | case IB_RATE_25_GBPS: return 10; | |
141 | case IB_RATE_100_GBPS: return 40; | |
142 | case IB_RATE_200_GBPS: return 80; | |
143 | case IB_RATE_300_GBPS: return 120; | |
144 | default: return -1; | |
bf6a9e31 JM |
145 | } |
146 | } | |
147 | EXPORT_SYMBOL(ib_rate_to_mult); | |
148 | ||
8385fd84 | 149 | __attribute_const__ enum ib_rate mult_to_ib_rate(int mult) |
bf6a9e31 JM |
150 | { |
151 | switch (mult) { | |
e2dda368 HWR |
152 | case 1: return IB_RATE_2_5_GBPS; |
153 | case 2: return IB_RATE_5_GBPS; | |
154 | case 4: return IB_RATE_10_GBPS; | |
155 | case 8: return IB_RATE_20_GBPS; | |
156 | case 12: return IB_RATE_30_GBPS; | |
157 | case 16: return IB_RATE_40_GBPS; | |
158 | case 24: return IB_RATE_60_GBPS; | |
159 | case 32: return IB_RATE_80_GBPS; | |
160 | case 48: return IB_RATE_120_GBPS; | |
161 | case 6: return IB_RATE_14_GBPS; | |
162 | case 22: return IB_RATE_56_GBPS; | |
163 | case 45: return IB_RATE_112_GBPS; | |
164 | case 67: return IB_RATE_168_GBPS; | |
165 | case 10: return IB_RATE_25_GBPS; | |
166 | case 40: return IB_RATE_100_GBPS; | |
167 | case 80: return IB_RATE_200_GBPS; | |
168 | case 120: return IB_RATE_300_GBPS; | |
169 | default: return IB_RATE_PORT_CURRENT; | |
bf6a9e31 JM |
170 | } |
171 | } | |
172 | EXPORT_SYMBOL(mult_to_ib_rate); | |
173 | ||
8385fd84 | 174 | __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate) |
71eeba16 MA |
175 | { |
176 | switch (rate) { | |
177 | case IB_RATE_2_5_GBPS: return 2500; | |
178 | case IB_RATE_5_GBPS: return 5000; | |
179 | case IB_RATE_10_GBPS: return 10000; | |
180 | case IB_RATE_20_GBPS: return 20000; | |
181 | case IB_RATE_30_GBPS: return 30000; | |
182 | case IB_RATE_40_GBPS: return 40000; | |
183 | case IB_RATE_60_GBPS: return 60000; | |
184 | case IB_RATE_80_GBPS: return 80000; | |
185 | case IB_RATE_120_GBPS: return 120000; | |
186 | case IB_RATE_14_GBPS: return 14062; | |
187 | case IB_RATE_56_GBPS: return 56250; | |
188 | case IB_RATE_112_GBPS: return 112500; | |
189 | case IB_RATE_168_GBPS: return 168750; | |
190 | case IB_RATE_25_GBPS: return 25781; | |
191 | case IB_RATE_100_GBPS: return 103125; | |
192 | case IB_RATE_200_GBPS: return 206250; | |
193 | case IB_RATE_300_GBPS: return 309375; | |
194 | default: return -1; | |
195 | } | |
196 | } | |
197 | EXPORT_SYMBOL(ib_rate_to_mbps); | |
198 | ||
8385fd84 | 199 | __attribute_const__ enum rdma_transport_type |
07ebafba TT |
200 | rdma_node_get_transport(enum rdma_node_type node_type) |
201 | { | |
cdc596d8 LR |
202 | |
203 | if (node_type == RDMA_NODE_USNIC) | |
5db5765e | 204 | return RDMA_TRANSPORT_USNIC; |
cdc596d8 | 205 | if (node_type == RDMA_NODE_USNIC_UDP) |
248567f7 | 206 | return RDMA_TRANSPORT_USNIC_UDP; |
cdc596d8 LR |
207 | if (node_type == RDMA_NODE_RNIC) |
208 | return RDMA_TRANSPORT_IWARP; | |
209 | ||
210 | return RDMA_TRANSPORT_IB; | |
07ebafba TT |
211 | } |
212 | EXPORT_SYMBOL(rdma_node_get_transport); | |
213 | ||
a3f5adaf EC |
214 | enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) |
215 | { | |
82901e3e | 216 | enum rdma_transport_type lt; |
a3f5adaf EC |
217 | if (device->get_link_layer) |
218 | return device->get_link_layer(device, port_num); | |
219 | ||
82901e3e LR |
220 | lt = rdma_node_get_transport(device->node_type); |
221 | if (lt == RDMA_TRANSPORT_IB) | |
a3f5adaf | 222 | return IB_LINK_LAYER_INFINIBAND; |
82901e3e LR |
223 | |
224 | return IB_LINK_LAYER_ETHERNET; | |
a3f5adaf EC |
225 | } |
226 | EXPORT_SYMBOL(rdma_port_get_link_layer); | |
227 | ||
1da177e4 LT |
228 | /* Protection domains */ |
229 | ||
96249d70 JG |
230 | /** |
231 | * ib_alloc_pd - Allocates an unused protection domain. | |
232 | * @device: The device on which to allocate the protection domain. | |
233 | * | |
234 | * A protection domain object provides an association between QPs, shared | |
235 | * receive queues, address handles, memory regions, and memory windows. | |
236 | * | |
237 | * Every PD has a local_dma_lkey which can be used as the lkey value for local | |
238 | * memory operations. | |
239 | */ | |
ed082d36 CH |
240 | struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, |
241 | const char *caller) | |
1da177e4 LT |
242 | { |
243 | struct ib_pd *pd; | |
ed082d36 | 244 | int mr_access_flags = 0; |
1da177e4 | 245 | |
b5e81bf5 | 246 | pd = device->alloc_pd(device, NULL, NULL); |
96249d70 JG |
247 | if (IS_ERR(pd)) |
248 | return pd; | |
1da177e4 | 249 | |
96249d70 JG |
250 | pd->device = device; |
251 | pd->uobject = NULL; | |
50d46335 | 252 | pd->__internal_mr = NULL; |
96249d70 | 253 | atomic_set(&pd->usecnt, 0); |
ed082d36 | 254 | pd->flags = flags; |
1da177e4 | 255 | |
86bee4c9 | 256 | if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) |
96249d70 | 257 | pd->local_dma_lkey = device->local_dma_lkey; |
ed082d36 CH |
258 | else |
259 | mr_access_flags |= IB_ACCESS_LOCAL_WRITE; | |
260 | ||
261 | if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) { | |
262 | pr_warn("%s: enabling unsafe global rkey\n", caller); | |
263 | mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE; | |
264 | } | |
265 | ||
9d5f8c20 LR |
266 | pd->res.type = RDMA_RESTRACK_PD; |
267 | pd->res.kern_name = caller; | |
268 | rdma_restrack_add(&pd->res); | |
269 | ||
ed082d36 | 270 | if (mr_access_flags) { |
96249d70 JG |
271 | struct ib_mr *mr; |
272 | ||
5ef990f0 | 273 | mr = pd->device->get_dma_mr(pd, mr_access_flags); |
96249d70 JG |
274 | if (IS_ERR(mr)) { |
275 | ib_dealloc_pd(pd); | |
5ef990f0 | 276 | return ERR_CAST(mr); |
96249d70 | 277 | } |
1da177e4 | 278 | |
5ef990f0 CH |
279 | mr->device = pd->device; |
280 | mr->pd = pd; | |
281 | mr->uobject = NULL; | |
282 | mr->need_inval = false; | |
283 | ||
50d46335 | 284 | pd->__internal_mr = mr; |
ed082d36 CH |
285 | |
286 | if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) | |
287 | pd->local_dma_lkey = pd->__internal_mr->lkey; | |
288 | ||
289 | if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) | |
290 | pd->unsafe_global_rkey = pd->__internal_mr->rkey; | |
1da177e4 | 291 | } |
ed082d36 | 292 | |
1da177e4 LT |
293 | return pd; |
294 | } | |
ed082d36 | 295 | EXPORT_SYMBOL(__ib_alloc_pd); |
1da177e4 | 296 | |
7dd78647 JG |
297 | /** |
298 | * ib_dealloc_pd - Deallocates a protection domain. | |
299 | * @pd: The protection domain to deallocate. | |
300 | * | |
301 | * It is an error to call this function while any resources in the pd still | |
302 | * exist. The caller is responsible to synchronously destroy them and | |
303 | * guarantee no new allocations will happen. | |
304 | */ | |
305 | void ib_dealloc_pd(struct ib_pd *pd) | |
1da177e4 | 306 | { |
7dd78647 JG |
307 | int ret; |
308 | ||
50d46335 | 309 | if (pd->__internal_mr) { |
5ef990f0 | 310 | ret = pd->device->dereg_mr(pd->__internal_mr); |
7dd78647 | 311 | WARN_ON(ret); |
50d46335 | 312 | pd->__internal_mr = NULL; |
96249d70 | 313 | } |
1da177e4 | 314 | |
7dd78647 JG |
315 | /* uverbs manipulates usecnt with proper locking, while the kabi |
316 | requires the caller to guarantee we can't race here. */ | |
317 | WARN_ON(atomic_read(&pd->usecnt)); | |
1da177e4 | 318 | |
9d5f8c20 | 319 | rdma_restrack_del(&pd->res); |
7dd78647 JG |
320 | /* Making delalloc_pd a void return is a WIP, no driver should return |
321 | an error here. */ | |
322 | ret = pd->device->dealloc_pd(pd); | |
323 | WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd"); | |
1da177e4 LT |
324 | } |
325 | EXPORT_SYMBOL(ib_dealloc_pd); | |
326 | ||
327 | /* Address handles */ | |
328 | ||
5cda6587 PP |
329 | static struct ib_ah *_rdma_create_ah(struct ib_pd *pd, |
330 | struct rdma_ah_attr *ah_attr, | |
331 | struct ib_udata *udata) | |
1da177e4 LT |
332 | { |
333 | struct ib_ah *ah; | |
334 | ||
5cda6587 | 335 | ah = pd->device->create_ah(pd, ah_attr, udata); |
1da177e4 LT |
336 | |
337 | if (!IS_ERR(ah)) { | |
b5e81bf5 RD |
338 | ah->device = pd->device; |
339 | ah->pd = pd; | |
340 | ah->uobject = NULL; | |
44c58487 | 341 | ah->type = ah_attr->type; |
1da177e4 LT |
342 | atomic_inc(&pd->usecnt); |
343 | } | |
344 | ||
345 | return ah; | |
346 | } | |
5cda6587 PP |
347 | |
348 | struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr) | |
349 | { | |
350 | return _rdma_create_ah(pd, ah_attr, NULL); | |
351 | } | |
0a18cfe4 | 352 | EXPORT_SYMBOL(rdma_create_ah); |
1da177e4 | 353 | |
5cda6587 PP |
354 | /** |
355 | * rdma_create_user_ah - Creates an address handle for the | |
356 | * given address vector. | |
357 | * It resolves destination mac address for ah attribute of RoCE type. | |
358 | * @pd: The protection domain associated with the address handle. | |
359 | * @ah_attr: The attributes of the address vector. | |
360 | * @udata: pointer to user's input output buffer information need by | |
361 | * provider driver. | |
362 | * | |
363 | * It returns 0 on success and returns appropriate error code on error. | |
364 | * The address handle is used to reference a local or global destination | |
365 | * in all UD QP post sends. | |
366 | */ | |
367 | struct ib_ah *rdma_create_user_ah(struct ib_pd *pd, | |
368 | struct rdma_ah_attr *ah_attr, | |
369 | struct ib_udata *udata) | |
370 | { | |
371 | int err; | |
372 | ||
373 | if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) { | |
374 | err = ib_resolve_eth_dmac(pd->device, ah_attr); | |
375 | if (err) | |
376 | return ERR_PTR(err); | |
377 | } | |
378 | ||
379 | return _rdma_create_ah(pd, ah_attr, udata); | |
380 | } | |
381 | EXPORT_SYMBOL(rdma_create_user_ah); | |
382 | ||
850d8fd7 | 383 | int ib_get_rdma_header_version(const union rdma_network_hdr *hdr) |
c865f246 SK |
384 | { |
385 | const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh; | |
386 | struct iphdr ip4h_checked; | |
387 | const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh; | |
388 | ||
389 | /* If it's IPv6, the version must be 6, otherwise, the first | |
390 | * 20 bytes (before the IPv4 header) are garbled. | |
391 | */ | |
392 | if (ip6h->version != 6) | |
393 | return (ip4h->version == 4) ? 4 : 0; | |
394 | /* version may be 6 or 4 because the first 20 bytes could be garbled */ | |
395 | ||
396 | /* RoCE v2 requires no options, thus header length | |
397 | * must be 5 words | |
398 | */ | |
399 | if (ip4h->ihl != 5) | |
400 | return 6; | |
401 | ||
402 | /* Verify checksum. | |
403 | * We can't write on scattered buffers so we need to copy to | |
404 | * temp buffer. | |
405 | */ | |
406 | memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked)); | |
407 | ip4h_checked.check = 0; | |
408 | ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5); | |
409 | /* if IPv4 header checksum is OK, believe it */ | |
410 | if (ip4h->check == ip4h_checked.check) | |
411 | return 4; | |
412 | return 6; | |
413 | } | |
850d8fd7 | 414 | EXPORT_SYMBOL(ib_get_rdma_header_version); |
c865f246 SK |
415 | |
416 | static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device, | |
417 | u8 port_num, | |
418 | const struct ib_grh *grh) | |
419 | { | |
420 | int grh_version; | |
421 | ||
422 | if (rdma_protocol_ib(device, port_num)) | |
423 | return RDMA_NETWORK_IB; | |
424 | ||
850d8fd7 | 425 | grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh); |
c865f246 SK |
426 | |
427 | if (grh_version == 4) | |
428 | return RDMA_NETWORK_IPV4; | |
429 | ||
430 | if (grh->next_hdr == IPPROTO_UDP) | |
431 | return RDMA_NETWORK_IPV6; | |
432 | ||
433 | return RDMA_NETWORK_ROCE_V1; | |
434 | } | |
435 | ||
dbf727de MB |
436 | struct find_gid_index_context { |
437 | u16 vlan_id; | |
c865f246 | 438 | enum ib_gid_type gid_type; |
dbf727de MB |
439 | }; |
440 | ||
441 | static bool find_gid_index(const union ib_gid *gid, | |
442 | const struct ib_gid_attr *gid_attr, | |
443 | void *context) | |
444 | { | |
b0dd0d33 | 445 | struct find_gid_index_context *ctx = context; |
dbf727de | 446 | |
c865f246 SK |
447 | if (ctx->gid_type != gid_attr->gid_type) |
448 | return false; | |
449 | ||
dbf727de MB |
450 | if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) || |
451 | (is_vlan_dev(gid_attr->ndev) && | |
452 | vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id)) | |
453 | return false; | |
454 | ||
455 | return true; | |
456 | } | |
457 | ||
458 | static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num, | |
459 | u16 vlan_id, const union ib_gid *sgid, | |
c865f246 | 460 | enum ib_gid_type gid_type, |
dbf727de MB |
461 | u16 *gid_index) |
462 | { | |
c865f246 SK |
463 | struct find_gid_index_context context = {.vlan_id = vlan_id, |
464 | .gid_type = gid_type}; | |
dbf727de MB |
465 | |
466 | return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index, | |
467 | &context, gid_index); | |
468 | } | |
469 | ||
850d8fd7 MS |
470 | int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, |
471 | enum rdma_network_type net_type, | |
472 | union ib_gid *sgid, union ib_gid *dgid) | |
c865f246 SK |
473 | { |
474 | struct sockaddr_in src_in; | |
475 | struct sockaddr_in dst_in; | |
476 | __be32 src_saddr, dst_saddr; | |
477 | ||
478 | if (!sgid || !dgid) | |
479 | return -EINVAL; | |
480 | ||
481 | if (net_type == RDMA_NETWORK_IPV4) { | |
482 | memcpy(&src_in.sin_addr.s_addr, | |
483 | &hdr->roce4grh.saddr, 4); | |
484 | memcpy(&dst_in.sin_addr.s_addr, | |
485 | &hdr->roce4grh.daddr, 4); | |
486 | src_saddr = src_in.sin_addr.s_addr; | |
487 | dst_saddr = dst_in.sin_addr.s_addr; | |
488 | ipv6_addr_set_v4mapped(src_saddr, | |
489 | (struct in6_addr *)sgid); | |
490 | ipv6_addr_set_v4mapped(dst_saddr, | |
491 | (struct in6_addr *)dgid); | |
492 | return 0; | |
493 | } else if (net_type == RDMA_NETWORK_IPV6 || | |
494 | net_type == RDMA_NETWORK_IB) { | |
495 | *dgid = hdr->ibgrh.dgid; | |
496 | *sgid = hdr->ibgrh.sgid; | |
497 | return 0; | |
498 | } else { | |
499 | return -EINVAL; | |
500 | } | |
501 | } | |
850d8fd7 | 502 | EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr); |
c865f246 | 503 | |
1060f865 PP |
504 | /* Resolve destination mac address and hop limit for unicast destination |
505 | * GID entry, considering the source GID entry as well. | |
506 | * ah_attribute must have have valid port_num, sgid_index. | |
507 | */ | |
508 | static int ib_resolve_unicast_gid_dmac(struct ib_device *device, | |
509 | struct rdma_ah_attr *ah_attr) | |
510 | { | |
511 | struct ib_gid_attr sgid_attr; | |
512 | struct ib_global_route *grh; | |
513 | int hop_limit = 0xff; | |
514 | union ib_gid sgid; | |
515 | int ret; | |
516 | ||
517 | grh = rdma_ah_retrieve_grh(ah_attr); | |
518 | ||
519 | ret = ib_query_gid(device, | |
520 | rdma_ah_get_port_num(ah_attr), | |
521 | grh->sgid_index, | |
522 | &sgid, &sgid_attr); | |
523 | if (ret || !sgid_attr.ndev) { | |
524 | if (!ret) | |
525 | ret = -ENXIO; | |
526 | return ret; | |
527 | } | |
528 | ||
56d0a7d9 PP |
529 | /* If destination is link local and source GID is RoCEv1, |
530 | * IP stack is not used. | |
531 | */ | |
532 | if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) && | |
533 | sgid_attr.gid_type == IB_GID_TYPE_ROCE) { | |
534 | rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw, | |
535 | ah_attr->roce.dmac); | |
536 | goto done; | |
537 | } | |
538 | ||
1060f865 PP |
539 | ret = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid, |
540 | ah_attr->roce.dmac, | |
541 | sgid_attr.ndev, &hop_limit); | |
56d0a7d9 | 542 | done: |
1060f865 PP |
543 | dev_put(sgid_attr.ndev); |
544 | ||
545 | grh->hop_limit = hop_limit; | |
546 | return ret; | |
547 | } | |
548 | ||
28b5b3a2 | 549 | /* |
f6bdb142 | 550 | * This function initializes address handle attributes from the incoming packet. |
28b5b3a2 GS |
551 | * Incoming packet has dgid of the receiver node on which this code is |
552 | * getting executed and, sgid contains the GID of the sender. | |
553 | * | |
554 | * When resolving mac address of destination, the arrived dgid is used | |
555 | * as sgid and, sgid is used as dgid because sgid contains destinations | |
556 | * GID whom to respond to. | |
557 | * | |
28b5b3a2 | 558 | */ |
f6bdb142 PP |
559 | int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num, |
560 | const struct ib_wc *wc, const struct ib_grh *grh, | |
561 | struct rdma_ah_attr *ah_attr) | |
513789ed | 562 | { |
513789ed HR |
563 | u32 flow_class; |
564 | u16 gid_index; | |
565 | int ret; | |
c865f246 SK |
566 | enum rdma_network_type net_type = RDMA_NETWORK_IB; |
567 | enum ib_gid_type gid_type = IB_GID_TYPE_IB; | |
c3efe750 | 568 | int hoplimit = 0xff; |
c865f246 SK |
569 | union ib_gid dgid; |
570 | union ib_gid sgid; | |
513789ed | 571 | |
79364227 RD |
572 | might_sleep(); |
573 | ||
4e00d694 | 574 | memset(ah_attr, 0, sizeof *ah_attr); |
44c58487 | 575 | ah_attr->type = rdma_ah_find_type(device, port_num); |
227128fc | 576 | if (rdma_cap_eth_ah(device, port_num)) { |
c865f246 SK |
577 | if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE) |
578 | net_type = wc->network_hdr_type; | |
579 | else | |
580 | net_type = ib_get_net_type_by_grh(device, port_num, grh); | |
581 | gid_type = ib_network_to_gid_type(net_type); | |
582 | } | |
850d8fd7 MS |
583 | ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type, |
584 | &sgid, &dgid); | |
c865f246 SK |
585 | if (ret) |
586 | return ret; | |
587 | ||
1060f865 PP |
588 | rdma_ah_set_sl(ah_attr, wc->sl); |
589 | rdma_ah_set_port_num(ah_attr, port_num); | |
590 | ||
c865f246 | 591 | if (rdma_protocol_roce(device, port_num)) { |
dbf727de MB |
592 | u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ? |
593 | wc->vlan_id : 0xffff; | |
594 | ||
dd5f03be MB |
595 | if (!(wc->wc_flags & IB_WC_GRH)) |
596 | return -EPROTOTYPE; | |
597 | ||
1060f865 PP |
598 | ret = get_sgid_index_from_eth(device, port_num, |
599 | vlan_id, &dgid, | |
600 | gid_type, &gid_index); | |
dbf727de MB |
601 | if (ret) |
602 | return ret; | |
dd5f03be | 603 | |
1060f865 PP |
604 | flow_class = be32_to_cpu(grh->version_tclass_flow); |
605 | rdma_ah_set_grh(ah_attr, &sgid, | |
606 | flow_class & 0xFFFFF, | |
607 | (u8)gid_index, hoplimit, | |
608 | (flow_class >> 20) & 0xFF); | |
609 | return ib_resolve_unicast_gid_dmac(device, ah_attr); | |
610 | } else { | |
611 | rdma_ah_set_dlid(ah_attr, wc->slid); | |
612 | rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits); | |
513789ed | 613 | |
1060f865 | 614 | if (wc->wc_flags & IB_WC_GRH) { |
b3556005 EC |
615 | if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) { |
616 | ret = ib_find_cached_gid_by_port(device, &dgid, | |
617 | IB_GID_TYPE_IB, | |
618 | port_num, NULL, | |
619 | &gid_index); | |
620 | if (ret) | |
621 | return ret; | |
622 | } else { | |
623 | gid_index = 0; | |
624 | } | |
d8966fcd | 625 | |
1060f865 PP |
626 | flow_class = be32_to_cpu(grh->version_tclass_flow); |
627 | rdma_ah_set_grh(ah_attr, &sgid, | |
628 | flow_class & 0xFFFFF, | |
629 | (u8)gid_index, hoplimit, | |
630 | (flow_class >> 20) & 0xFF); | |
631 | } | |
632 | return 0; | |
513789ed | 633 | } |
4e00d694 | 634 | } |
f6bdb142 | 635 | EXPORT_SYMBOL(ib_init_ah_attr_from_wc); |
4e00d694 | 636 | |
73cdaaee IW |
637 | struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, |
638 | const struct ib_grh *grh, u8 port_num) | |
4e00d694 | 639 | { |
90898850 | 640 | struct rdma_ah_attr ah_attr; |
4e00d694 SH |
641 | int ret; |
642 | ||
f6bdb142 | 643 | ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr); |
4e00d694 SH |
644 | if (ret) |
645 | return ERR_PTR(ret); | |
513789ed | 646 | |
0a18cfe4 | 647 | return rdma_create_ah(pd, &ah_attr); |
513789ed HR |
648 | } |
649 | EXPORT_SYMBOL(ib_create_ah_from_wc); | |
650 | ||
67b985b6 | 651 | int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr) |
1da177e4 | 652 | { |
44c58487 DC |
653 | if (ah->type != ah_attr->type) |
654 | return -EINVAL; | |
655 | ||
1da177e4 LT |
656 | return ah->device->modify_ah ? |
657 | ah->device->modify_ah(ah, ah_attr) : | |
87915bf8 | 658 | -EOPNOTSUPP; |
1da177e4 | 659 | } |
67b985b6 | 660 | EXPORT_SYMBOL(rdma_modify_ah); |
1da177e4 | 661 | |
bfbfd661 | 662 | int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr) |
1da177e4 LT |
663 | { |
664 | return ah->device->query_ah ? | |
665 | ah->device->query_ah(ah, ah_attr) : | |
87915bf8 | 666 | -EOPNOTSUPP; |
1da177e4 | 667 | } |
bfbfd661 | 668 | EXPORT_SYMBOL(rdma_query_ah); |
1da177e4 | 669 | |
36523159 | 670 | int rdma_destroy_ah(struct ib_ah *ah) |
1da177e4 LT |
671 | { |
672 | struct ib_pd *pd; | |
673 | int ret; | |
674 | ||
675 | pd = ah->pd; | |
676 | ret = ah->device->destroy_ah(ah); | |
677 | if (!ret) | |
678 | atomic_dec(&pd->usecnt); | |
679 | ||
680 | return ret; | |
681 | } | |
36523159 | 682 | EXPORT_SYMBOL(rdma_destroy_ah); |
1da177e4 | 683 | |
d41fcc67 RD |
684 | /* Shared receive queues */ |
685 | ||
686 | struct ib_srq *ib_create_srq(struct ib_pd *pd, | |
687 | struct ib_srq_init_attr *srq_init_attr) | |
688 | { | |
689 | struct ib_srq *srq; | |
690 | ||
691 | if (!pd->device->create_srq) | |
87915bf8 | 692 | return ERR_PTR(-EOPNOTSUPP); |
d41fcc67 RD |
693 | |
694 | srq = pd->device->create_srq(pd, srq_init_attr, NULL); | |
695 | ||
696 | if (!IS_ERR(srq)) { | |
697 | srq->device = pd->device; | |
698 | srq->pd = pd; | |
699 | srq->uobject = NULL; | |
700 | srq->event_handler = srq_init_attr->event_handler; | |
701 | srq->srq_context = srq_init_attr->srq_context; | |
96104eda | 702 | srq->srq_type = srq_init_attr->srq_type; |
1a56ff6d AK |
703 | if (ib_srq_has_cq(srq->srq_type)) { |
704 | srq->ext.cq = srq_init_attr->ext.cq; | |
705 | atomic_inc(&srq->ext.cq->usecnt); | |
706 | } | |
418d5130 SH |
707 | if (srq->srq_type == IB_SRQT_XRC) { |
708 | srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd; | |
418d5130 | 709 | atomic_inc(&srq->ext.xrc.xrcd->usecnt); |
418d5130 | 710 | } |
d41fcc67 RD |
711 | atomic_inc(&pd->usecnt); |
712 | atomic_set(&srq->usecnt, 0); | |
713 | } | |
714 | ||
715 | return srq; | |
716 | } | |
717 | EXPORT_SYMBOL(ib_create_srq); | |
718 | ||
719 | int ib_modify_srq(struct ib_srq *srq, | |
720 | struct ib_srq_attr *srq_attr, | |
721 | enum ib_srq_attr_mask srq_attr_mask) | |
722 | { | |
7ce5eacb DB |
723 | return srq->device->modify_srq ? |
724 | srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) : | |
87915bf8 | 725 | -EOPNOTSUPP; |
d41fcc67 RD |
726 | } |
727 | EXPORT_SYMBOL(ib_modify_srq); | |
728 | ||
729 | int ib_query_srq(struct ib_srq *srq, | |
730 | struct ib_srq_attr *srq_attr) | |
731 | { | |
732 | return srq->device->query_srq ? | |
87915bf8 | 733 | srq->device->query_srq(srq, srq_attr) : -EOPNOTSUPP; |
d41fcc67 RD |
734 | } |
735 | EXPORT_SYMBOL(ib_query_srq); | |
736 | ||
737 | int ib_destroy_srq(struct ib_srq *srq) | |
738 | { | |
739 | struct ib_pd *pd; | |
418d5130 SH |
740 | enum ib_srq_type srq_type; |
741 | struct ib_xrcd *uninitialized_var(xrcd); | |
742 | struct ib_cq *uninitialized_var(cq); | |
d41fcc67 RD |
743 | int ret; |
744 | ||
745 | if (atomic_read(&srq->usecnt)) | |
746 | return -EBUSY; | |
747 | ||
748 | pd = srq->pd; | |
418d5130 | 749 | srq_type = srq->srq_type; |
1a56ff6d AK |
750 | if (ib_srq_has_cq(srq_type)) |
751 | cq = srq->ext.cq; | |
752 | if (srq_type == IB_SRQT_XRC) | |
418d5130 | 753 | xrcd = srq->ext.xrc.xrcd; |
d41fcc67 RD |
754 | |
755 | ret = srq->device->destroy_srq(srq); | |
418d5130 | 756 | if (!ret) { |
d41fcc67 | 757 | atomic_dec(&pd->usecnt); |
1a56ff6d | 758 | if (srq_type == IB_SRQT_XRC) |
418d5130 | 759 | atomic_dec(&xrcd->usecnt); |
1a56ff6d | 760 | if (ib_srq_has_cq(srq_type)) |
418d5130 | 761 | atomic_dec(&cq->usecnt); |
418d5130 | 762 | } |
d41fcc67 RD |
763 | |
764 | return ret; | |
765 | } | |
766 | EXPORT_SYMBOL(ib_destroy_srq); | |
767 | ||
1da177e4 LT |
768 | /* Queue pairs */ |
769 | ||
0e0ec7e0 SH |
770 | static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) |
771 | { | |
772 | struct ib_qp *qp = context; | |
73c40c61 | 773 | unsigned long flags; |
0e0ec7e0 | 774 | |
73c40c61 | 775 | spin_lock_irqsave(&qp->device->event_handler_lock, flags); |
0e0ec7e0 | 776 | list_for_each_entry(event->element.qp, &qp->open_list, open_list) |
eec9e29f SP |
777 | if (event->element.qp->event_handler) |
778 | event->element.qp->event_handler(event, event->element.qp->qp_context); | |
73c40c61 | 779 | spin_unlock_irqrestore(&qp->device->event_handler_lock, flags); |
0e0ec7e0 SH |
780 | } |
781 | ||
d3d72d90 SH |
782 | static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) |
783 | { | |
784 | mutex_lock(&xrcd->tgt_qp_mutex); | |
785 | list_add(&qp->xrcd_list, &xrcd->tgt_qp_list); | |
786 | mutex_unlock(&xrcd->tgt_qp_mutex); | |
787 | } | |
788 | ||
0e0ec7e0 SH |
789 | static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, |
790 | void (*event_handler)(struct ib_event *, void *), | |
791 | void *qp_context) | |
d3d72d90 | 792 | { |
0e0ec7e0 SH |
793 | struct ib_qp *qp; |
794 | unsigned long flags; | |
d291f1a6 | 795 | int err; |
0e0ec7e0 SH |
796 | |
797 | qp = kzalloc(sizeof *qp, GFP_KERNEL); | |
798 | if (!qp) | |
799 | return ERR_PTR(-ENOMEM); | |
800 | ||
d291f1a6 DJ |
801 | qp->real_qp = real_qp; |
802 | err = ib_open_shared_qp_security(qp, real_qp->device); | |
803 | if (err) { | |
804 | kfree(qp); | |
805 | return ERR_PTR(err); | |
806 | } | |
807 | ||
0e0ec7e0 SH |
808 | qp->real_qp = real_qp; |
809 | atomic_inc(&real_qp->usecnt); | |
810 | qp->device = real_qp->device; | |
811 | qp->event_handler = event_handler; | |
812 | qp->qp_context = qp_context; | |
813 | qp->qp_num = real_qp->qp_num; | |
814 | qp->qp_type = real_qp->qp_type; | |
815 | ||
816 | spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); | |
817 | list_add(&qp->open_list, &real_qp->open_list); | |
818 | spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); | |
819 | ||
820 | return qp; | |
821 | } | |
822 | ||
823 | struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, | |
824 | struct ib_qp_open_attr *qp_open_attr) | |
825 | { | |
826 | struct ib_qp *qp, *real_qp; | |
827 | ||
828 | if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) | |
829 | return ERR_PTR(-EINVAL); | |
830 | ||
831 | qp = ERR_PTR(-EINVAL); | |
d3d72d90 | 832 | mutex_lock(&xrcd->tgt_qp_mutex); |
0e0ec7e0 SH |
833 | list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) { |
834 | if (real_qp->qp_num == qp_open_attr->qp_num) { | |
835 | qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, | |
836 | qp_open_attr->qp_context); | |
837 | break; | |
838 | } | |
839 | } | |
d3d72d90 | 840 | mutex_unlock(&xrcd->tgt_qp_mutex); |
0e0ec7e0 | 841 | return qp; |
d3d72d90 | 842 | } |
0e0ec7e0 | 843 | EXPORT_SYMBOL(ib_open_qp); |
d3d72d90 | 844 | |
04c41bf3 CH |
845 | static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp, |
846 | struct ib_qp_init_attr *qp_init_attr) | |
847 | { | |
848 | struct ib_qp *real_qp = qp; | |
849 | ||
850 | qp->event_handler = __ib_shared_qp_event_handler; | |
851 | qp->qp_context = qp; | |
852 | qp->pd = NULL; | |
853 | qp->send_cq = qp->recv_cq = NULL; | |
854 | qp->srq = NULL; | |
855 | qp->xrcd = qp_init_attr->xrcd; | |
856 | atomic_inc(&qp_init_attr->xrcd->usecnt); | |
857 | INIT_LIST_HEAD(&qp->open_list); | |
858 | ||
859 | qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, | |
860 | qp_init_attr->qp_context); | |
861 | if (!IS_ERR(qp)) | |
862 | __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp); | |
863 | else | |
864 | real_qp->device->destroy_qp(real_qp); | |
865 | return qp; | |
866 | } | |
867 | ||
1da177e4 LT |
868 | struct ib_qp *ib_create_qp(struct ib_pd *pd, |
869 | struct ib_qp_init_attr *qp_init_attr) | |
870 | { | |
04c41bf3 CH |
871 | struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device; |
872 | struct ib_qp *qp; | |
a060b562 CH |
873 | int ret; |
874 | ||
a9017e23 YH |
875 | if (qp_init_attr->rwq_ind_tbl && |
876 | (qp_init_attr->recv_cq || | |
877 | qp_init_attr->srq || qp_init_attr->cap.max_recv_wr || | |
878 | qp_init_attr->cap.max_recv_sge)) | |
879 | return ERR_PTR(-EINVAL); | |
880 | ||
a060b562 CH |
881 | /* |
882 | * If the callers is using the RDMA API calculate the resources | |
883 | * needed for the RDMA READ/WRITE operations. | |
884 | * | |
885 | * Note that these callers need to pass in a port number. | |
886 | */ | |
887 | if (qp_init_attr->cap.max_rdma_ctxs) | |
888 | rdma_rw_init_qp(device, qp_init_attr); | |
1da177e4 | 889 | |
2f08ee36 | 890 | qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL); |
04c41bf3 CH |
891 | if (IS_ERR(qp)) |
892 | return qp; | |
893 | ||
d291f1a6 DJ |
894 | ret = ib_create_qp_security(qp, device); |
895 | if (ret) { | |
896 | ib_destroy_qp(qp); | |
897 | return ERR_PTR(ret); | |
898 | } | |
899 | ||
04c41bf3 | 900 | qp->real_qp = qp; |
04c41bf3 | 901 | qp->qp_type = qp_init_attr->qp_type; |
a9017e23 | 902 | qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl; |
04c41bf3 CH |
903 | |
904 | atomic_set(&qp->usecnt, 0); | |
fffb0383 CH |
905 | qp->mrs_used = 0; |
906 | spin_lock_init(&qp->mr_lock); | |
a060b562 | 907 | INIT_LIST_HEAD(&qp->rdma_mrs); |
0e353e34 | 908 | INIT_LIST_HEAD(&qp->sig_mrs); |
498ca3c8 | 909 | qp->port = 0; |
fffb0383 | 910 | |
04c41bf3 CH |
911 | if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) |
912 | return ib_create_xrc_qp(qp, qp_init_attr); | |
913 | ||
914 | qp->event_handler = qp_init_attr->event_handler; | |
915 | qp->qp_context = qp_init_attr->qp_context; | |
916 | if (qp_init_attr->qp_type == IB_QPT_XRC_INI) { | |
917 | qp->recv_cq = NULL; | |
918 | qp->srq = NULL; | |
919 | } else { | |
920 | qp->recv_cq = qp_init_attr->recv_cq; | |
a9017e23 YH |
921 | if (qp_init_attr->recv_cq) |
922 | atomic_inc(&qp_init_attr->recv_cq->usecnt); | |
04c41bf3 CH |
923 | qp->srq = qp_init_attr->srq; |
924 | if (qp->srq) | |
925 | atomic_inc(&qp_init_attr->srq->usecnt); | |
1da177e4 LT |
926 | } |
927 | ||
04c41bf3 CH |
928 | qp->send_cq = qp_init_attr->send_cq; |
929 | qp->xrcd = NULL; | |
930 | ||
931 | atomic_inc(&pd->usecnt); | |
a9017e23 YH |
932 | if (qp_init_attr->send_cq) |
933 | atomic_inc(&qp_init_attr->send_cq->usecnt); | |
934 | if (qp_init_attr->rwq_ind_tbl) | |
935 | atomic_inc(&qp->rwq_ind_tbl->usecnt); | |
a060b562 CH |
936 | |
937 | if (qp_init_attr->cap.max_rdma_ctxs) { | |
938 | ret = rdma_rw_init_mrs(qp, qp_init_attr); | |
939 | if (ret) { | |
940 | pr_err("failed to init MR pool ret= %d\n", ret); | |
941 | ib_destroy_qp(qp); | |
b6bc1c73 | 942 | return ERR_PTR(ret); |
a060b562 CH |
943 | } |
944 | } | |
945 | ||
632bc3f6 BVA |
946 | /* |
947 | * Note: all hw drivers guarantee that max_send_sge is lower than | |
948 | * the device RDMA WRITE SGE limit but not all hw drivers ensure that | |
949 | * max_send_sge <= max_sge_rd. | |
950 | */ | |
951 | qp->max_write_sge = qp_init_attr->cap.max_send_sge; | |
952 | qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge, | |
953 | device->attrs.max_sge_rd); | |
954 | ||
1da177e4 LT |
955 | return qp; |
956 | } | |
957 | EXPORT_SYMBOL(ib_create_qp); | |
958 | ||
8a51866f RD |
959 | static const struct { |
960 | int valid; | |
b42b63cf SH |
961 | enum ib_qp_attr_mask req_param[IB_QPT_MAX]; |
962 | enum ib_qp_attr_mask opt_param[IB_QPT_MAX]; | |
8a51866f RD |
963 | } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { |
964 | [IB_QPS_RESET] = { | |
965 | [IB_QPS_RESET] = { .valid = 1 }, | |
8a51866f RD |
966 | [IB_QPS_INIT] = { |
967 | .valid = 1, | |
968 | .req_param = { | |
969 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | | |
970 | IB_QP_PORT | | |
971 | IB_QP_QKEY), | |
c938a616 | 972 | [IB_QPT_RAW_PACKET] = IB_QP_PORT, |
8a51866f RD |
973 | [IB_QPT_UC] = (IB_QP_PKEY_INDEX | |
974 | IB_QP_PORT | | |
975 | IB_QP_ACCESS_FLAGS), | |
976 | [IB_QPT_RC] = (IB_QP_PKEY_INDEX | | |
977 | IB_QP_PORT | | |
978 | IB_QP_ACCESS_FLAGS), | |
b42b63cf SH |
979 | [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | |
980 | IB_QP_PORT | | |
981 | IB_QP_ACCESS_FLAGS), | |
982 | [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | | |
983 | IB_QP_PORT | | |
984 | IB_QP_ACCESS_FLAGS), | |
8a51866f RD |
985 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
986 | IB_QP_QKEY), | |
987 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | | |
988 | IB_QP_QKEY), | |
989 | } | |
990 | }, | |
991 | }, | |
992 | [IB_QPS_INIT] = { | |
993 | [IB_QPS_RESET] = { .valid = 1 }, | |
994 | [IB_QPS_ERR] = { .valid = 1 }, | |
995 | [IB_QPS_INIT] = { | |
996 | .valid = 1, | |
997 | .opt_param = { | |
998 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | | |
999 | IB_QP_PORT | | |
1000 | IB_QP_QKEY), | |
1001 | [IB_QPT_UC] = (IB_QP_PKEY_INDEX | | |
1002 | IB_QP_PORT | | |
1003 | IB_QP_ACCESS_FLAGS), | |
1004 | [IB_QPT_RC] = (IB_QP_PKEY_INDEX | | |
1005 | IB_QP_PORT | | |
1006 | IB_QP_ACCESS_FLAGS), | |
b42b63cf SH |
1007 | [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | |
1008 | IB_QP_PORT | | |
1009 | IB_QP_ACCESS_FLAGS), | |
1010 | [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | | |
1011 | IB_QP_PORT | | |
1012 | IB_QP_ACCESS_FLAGS), | |
8a51866f RD |
1013 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
1014 | IB_QP_QKEY), | |
1015 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | | |
1016 | IB_QP_QKEY), | |
1017 | } | |
1018 | }, | |
1019 | [IB_QPS_RTR] = { | |
1020 | .valid = 1, | |
1021 | .req_param = { | |
1022 | [IB_QPT_UC] = (IB_QP_AV | | |
1023 | IB_QP_PATH_MTU | | |
1024 | IB_QP_DEST_QPN | | |
1025 | IB_QP_RQ_PSN), | |
1026 | [IB_QPT_RC] = (IB_QP_AV | | |
1027 | IB_QP_PATH_MTU | | |
1028 | IB_QP_DEST_QPN | | |
1029 | IB_QP_RQ_PSN | | |
1030 | IB_QP_MAX_DEST_RD_ATOMIC | | |
1031 | IB_QP_MIN_RNR_TIMER), | |
b42b63cf SH |
1032 | [IB_QPT_XRC_INI] = (IB_QP_AV | |
1033 | IB_QP_PATH_MTU | | |
1034 | IB_QP_DEST_QPN | | |
1035 | IB_QP_RQ_PSN), | |
1036 | [IB_QPT_XRC_TGT] = (IB_QP_AV | | |
1037 | IB_QP_PATH_MTU | | |
1038 | IB_QP_DEST_QPN | | |
1039 | IB_QP_RQ_PSN | | |
1040 | IB_QP_MAX_DEST_RD_ATOMIC | | |
1041 | IB_QP_MIN_RNR_TIMER), | |
8a51866f RD |
1042 | }, |
1043 | .opt_param = { | |
1044 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | | |
1045 | IB_QP_QKEY), | |
1046 | [IB_QPT_UC] = (IB_QP_ALT_PATH | | |
1047 | IB_QP_ACCESS_FLAGS | | |
1048 | IB_QP_PKEY_INDEX), | |
1049 | [IB_QPT_RC] = (IB_QP_ALT_PATH | | |
1050 | IB_QP_ACCESS_FLAGS | | |
1051 | IB_QP_PKEY_INDEX), | |
b42b63cf SH |
1052 | [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH | |
1053 | IB_QP_ACCESS_FLAGS | | |
1054 | IB_QP_PKEY_INDEX), | |
1055 | [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH | | |
1056 | IB_QP_ACCESS_FLAGS | | |
1057 | IB_QP_PKEY_INDEX), | |
8a51866f RD |
1058 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
1059 | IB_QP_QKEY), | |
1060 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | | |
1061 | IB_QP_QKEY), | |
dd5f03be | 1062 | }, |
dbf727de | 1063 | }, |
8a51866f RD |
1064 | }, |
1065 | [IB_QPS_RTR] = { | |
1066 | [IB_QPS_RESET] = { .valid = 1 }, | |
1067 | [IB_QPS_ERR] = { .valid = 1 }, | |
1068 | [IB_QPS_RTS] = { | |
1069 | .valid = 1, | |
1070 | .req_param = { | |
1071 | [IB_QPT_UD] = IB_QP_SQ_PSN, | |
1072 | [IB_QPT_UC] = IB_QP_SQ_PSN, | |
1073 | [IB_QPT_RC] = (IB_QP_TIMEOUT | | |
1074 | IB_QP_RETRY_CNT | | |
1075 | IB_QP_RNR_RETRY | | |
1076 | IB_QP_SQ_PSN | | |
1077 | IB_QP_MAX_QP_RD_ATOMIC), | |
b42b63cf SH |
1078 | [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT | |
1079 | IB_QP_RETRY_CNT | | |
1080 | IB_QP_RNR_RETRY | | |
1081 | IB_QP_SQ_PSN | | |
1082 | IB_QP_MAX_QP_RD_ATOMIC), | |
1083 | [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT | | |
1084 | IB_QP_SQ_PSN), | |
8a51866f RD |
1085 | [IB_QPT_SMI] = IB_QP_SQ_PSN, |
1086 | [IB_QPT_GSI] = IB_QP_SQ_PSN, | |
1087 | }, | |
1088 | .opt_param = { | |
1089 | [IB_QPT_UD] = (IB_QP_CUR_STATE | | |
1090 | IB_QP_QKEY), | |
1091 | [IB_QPT_UC] = (IB_QP_CUR_STATE | | |
1092 | IB_QP_ALT_PATH | | |
1093 | IB_QP_ACCESS_FLAGS | | |
1094 | IB_QP_PATH_MIG_STATE), | |
1095 | [IB_QPT_RC] = (IB_QP_CUR_STATE | | |
1096 | IB_QP_ALT_PATH | | |
1097 | IB_QP_ACCESS_FLAGS | | |
1098 | IB_QP_MIN_RNR_TIMER | | |
1099 | IB_QP_PATH_MIG_STATE), | |
b42b63cf SH |
1100 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | |
1101 | IB_QP_ALT_PATH | | |
1102 | IB_QP_ACCESS_FLAGS | | |
1103 | IB_QP_PATH_MIG_STATE), | |
1104 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | | |
1105 | IB_QP_ALT_PATH | | |
1106 | IB_QP_ACCESS_FLAGS | | |
1107 | IB_QP_MIN_RNR_TIMER | | |
1108 | IB_QP_PATH_MIG_STATE), | |
8a51866f RD |
1109 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
1110 | IB_QP_QKEY), | |
1111 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | | |
1112 | IB_QP_QKEY), | |
528e5a1b | 1113 | [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT, |
8a51866f RD |
1114 | } |
1115 | } | |
1116 | }, | |
1117 | [IB_QPS_RTS] = { | |
1118 | [IB_QPS_RESET] = { .valid = 1 }, | |
1119 | [IB_QPS_ERR] = { .valid = 1 }, | |
1120 | [IB_QPS_RTS] = { | |
1121 | .valid = 1, | |
1122 | .opt_param = { | |
1123 | [IB_QPT_UD] = (IB_QP_CUR_STATE | | |
1124 | IB_QP_QKEY), | |
4546d31d DB |
1125 | [IB_QPT_UC] = (IB_QP_CUR_STATE | |
1126 | IB_QP_ACCESS_FLAGS | | |
8a51866f RD |
1127 | IB_QP_ALT_PATH | |
1128 | IB_QP_PATH_MIG_STATE), | |
4546d31d DB |
1129 | [IB_QPT_RC] = (IB_QP_CUR_STATE | |
1130 | IB_QP_ACCESS_FLAGS | | |
8a51866f RD |
1131 | IB_QP_ALT_PATH | |
1132 | IB_QP_PATH_MIG_STATE | | |
1133 | IB_QP_MIN_RNR_TIMER), | |
b42b63cf SH |
1134 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | |
1135 | IB_QP_ACCESS_FLAGS | | |
1136 | IB_QP_ALT_PATH | | |
1137 | IB_QP_PATH_MIG_STATE), | |
1138 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | | |
1139 | IB_QP_ACCESS_FLAGS | | |
1140 | IB_QP_ALT_PATH | | |
1141 | IB_QP_PATH_MIG_STATE | | |
1142 | IB_QP_MIN_RNR_TIMER), | |
8a51866f RD |
1143 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
1144 | IB_QP_QKEY), | |
1145 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | | |
1146 | IB_QP_QKEY), | |
528e5a1b | 1147 | [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT, |
8a51866f RD |
1148 | } |
1149 | }, | |
1150 | [IB_QPS_SQD] = { | |
1151 | .valid = 1, | |
1152 | .opt_param = { | |
1153 | [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, | |
1154 | [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, | |
1155 | [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, | |
b42b63cf SH |
1156 | [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
1157 | [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */ | |
8a51866f RD |
1158 | [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
1159 | [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY | |
1160 | } | |
1161 | }, | |
1162 | }, | |
1163 | [IB_QPS_SQD] = { | |
1164 | [IB_QPS_RESET] = { .valid = 1 }, | |
1165 | [IB_QPS_ERR] = { .valid = 1 }, | |
1166 | [IB_QPS_RTS] = { | |
1167 | .valid = 1, | |
1168 | .opt_param = { | |
1169 | [IB_QPT_UD] = (IB_QP_CUR_STATE | | |
1170 | IB_QP_QKEY), | |
1171 | [IB_QPT_UC] = (IB_QP_CUR_STATE | | |
1172 | IB_QP_ALT_PATH | | |
1173 | IB_QP_ACCESS_FLAGS | | |
1174 | IB_QP_PATH_MIG_STATE), | |
1175 | [IB_QPT_RC] = (IB_QP_CUR_STATE | | |
1176 | IB_QP_ALT_PATH | | |
1177 | IB_QP_ACCESS_FLAGS | | |
1178 | IB_QP_MIN_RNR_TIMER | | |
1179 | IB_QP_PATH_MIG_STATE), | |
b42b63cf SH |
1180 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | |
1181 | IB_QP_ALT_PATH | | |
1182 | IB_QP_ACCESS_FLAGS | | |
1183 | IB_QP_PATH_MIG_STATE), | |
1184 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | | |
1185 | IB_QP_ALT_PATH | | |
1186 | IB_QP_ACCESS_FLAGS | | |
1187 | IB_QP_MIN_RNR_TIMER | | |
1188 | IB_QP_PATH_MIG_STATE), | |
8a51866f RD |
1189 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
1190 | IB_QP_QKEY), | |
1191 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | | |
1192 | IB_QP_QKEY), | |
1193 | } | |
1194 | }, | |
1195 | [IB_QPS_SQD] = { | |
1196 | .valid = 1, | |
1197 | .opt_param = { | |
1198 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | | |
1199 | IB_QP_QKEY), | |
1200 | [IB_QPT_UC] = (IB_QP_AV | | |
8a51866f RD |
1201 | IB_QP_ALT_PATH | |
1202 | IB_QP_ACCESS_FLAGS | | |
1203 | IB_QP_PKEY_INDEX | | |
1204 | IB_QP_PATH_MIG_STATE), | |
1205 | [IB_QPT_RC] = (IB_QP_PORT | | |
1206 | IB_QP_AV | | |
1207 | IB_QP_TIMEOUT | | |
1208 | IB_QP_RETRY_CNT | | |
1209 | IB_QP_RNR_RETRY | | |
1210 | IB_QP_MAX_QP_RD_ATOMIC | | |
1211 | IB_QP_MAX_DEST_RD_ATOMIC | | |
8a51866f RD |
1212 | IB_QP_ALT_PATH | |
1213 | IB_QP_ACCESS_FLAGS | | |
1214 | IB_QP_PKEY_INDEX | | |
1215 | IB_QP_MIN_RNR_TIMER | | |
1216 | IB_QP_PATH_MIG_STATE), | |
b42b63cf SH |
1217 | [IB_QPT_XRC_INI] = (IB_QP_PORT | |
1218 | IB_QP_AV | | |
1219 | IB_QP_TIMEOUT | | |
1220 | IB_QP_RETRY_CNT | | |
1221 | IB_QP_RNR_RETRY | | |
1222 | IB_QP_MAX_QP_RD_ATOMIC | | |
1223 | IB_QP_ALT_PATH | | |
1224 | IB_QP_ACCESS_FLAGS | | |
1225 | IB_QP_PKEY_INDEX | | |
1226 | IB_QP_PATH_MIG_STATE), | |
1227 | [IB_QPT_XRC_TGT] = (IB_QP_PORT | | |
1228 | IB_QP_AV | | |
1229 | IB_QP_TIMEOUT | | |
1230 | IB_QP_MAX_DEST_RD_ATOMIC | | |
1231 | IB_QP_ALT_PATH | | |
1232 | IB_QP_ACCESS_FLAGS | | |
1233 | IB_QP_PKEY_INDEX | | |
1234 | IB_QP_MIN_RNR_TIMER | | |
1235 | IB_QP_PATH_MIG_STATE), | |
8a51866f RD |
1236 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
1237 | IB_QP_QKEY), | |
1238 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | | |
1239 | IB_QP_QKEY), | |
1240 | } | |
1241 | } | |
1242 | }, | |
1243 | [IB_QPS_SQE] = { | |
1244 | [IB_QPS_RESET] = { .valid = 1 }, | |
1245 | [IB_QPS_ERR] = { .valid = 1 }, | |
1246 | [IB_QPS_RTS] = { | |
1247 | .valid = 1, | |
1248 | .opt_param = { | |
1249 | [IB_QPT_UD] = (IB_QP_CUR_STATE | | |
1250 | IB_QP_QKEY), | |
1251 | [IB_QPT_UC] = (IB_QP_CUR_STATE | | |
1252 | IB_QP_ACCESS_FLAGS), | |
1253 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | | |
1254 | IB_QP_QKEY), | |
1255 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | | |
1256 | IB_QP_QKEY), | |
1257 | } | |
1258 | } | |
1259 | }, | |
1260 | [IB_QPS_ERR] = { | |
1261 | [IB_QPS_RESET] = { .valid = 1 }, | |
1262 | [IB_QPS_ERR] = { .valid = 1 } | |
1263 | } | |
1264 | }; | |
1265 | ||
19b1f540 LR |
1266 | bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, |
1267 | enum ib_qp_type type, enum ib_qp_attr_mask mask, | |
1268 | enum rdma_link_layer ll) | |
8a51866f RD |
1269 | { |
1270 | enum ib_qp_attr_mask req_param, opt_param; | |
1271 | ||
8a51866f RD |
1272 | if (mask & IB_QP_CUR_STATE && |
1273 | cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS && | |
1274 | cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE) | |
19b1f540 | 1275 | return false; |
8a51866f RD |
1276 | |
1277 | if (!qp_state_table[cur_state][next_state].valid) | |
19b1f540 | 1278 | return false; |
8a51866f RD |
1279 | |
1280 | req_param = qp_state_table[cur_state][next_state].req_param[type]; | |
1281 | opt_param = qp_state_table[cur_state][next_state].opt_param[type]; | |
1282 | ||
1283 | if ((mask & req_param) != req_param) | |
19b1f540 | 1284 | return false; |
8a51866f RD |
1285 | |
1286 | if (mask & ~(req_param | opt_param | IB_QP_STATE)) | |
19b1f540 | 1287 | return false; |
8a51866f | 1288 | |
19b1f540 | 1289 | return true; |
8a51866f RD |
1290 | } |
1291 | EXPORT_SYMBOL(ib_modify_qp_is_ok); | |
1292 | ||
c0348eb0 PP |
1293 | static int ib_resolve_eth_dmac(struct ib_device *device, |
1294 | struct rdma_ah_attr *ah_attr) | |
ed4c54e5 OG |
1295 | { |
1296 | int ret = 0; | |
d8966fcd | 1297 | struct ib_global_route *grh; |
ed4c54e5 | 1298 | |
d8966fcd | 1299 | if (!rdma_is_port_valid(device, rdma_ah_get_port_num(ah_attr))) |
c90ea9d8 | 1300 | return -EINVAL; |
dbf727de | 1301 | |
d8966fcd DC |
1302 | grh = rdma_ah_retrieve_grh(ah_attr); |
1303 | ||
9636a56f NO |
1304 | if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) { |
1305 | if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) { | |
1306 | __be32 addr = 0; | |
1307 | ||
1308 | memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4); | |
1309 | ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac); | |
1310 | } else { | |
1311 | ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw, | |
1312 | (char *)ah_attr->roce.dmac); | |
1313 | } | |
c90ea9d8 | 1314 | } else { |
1060f865 | 1315 | ret = ib_resolve_unicast_gid_dmac(device, ah_attr); |
ed4c54e5 | 1316 | } |
ed4c54e5 OG |
1317 | return ret; |
1318 | } | |
ed4c54e5 | 1319 | |
a512c2fb | 1320 | /** |
b96ac05a | 1321 | * IB core internal function to perform QP attributes modification. |
a512c2fb | 1322 | */ |
b96ac05a PP |
1323 | static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, |
1324 | int attr_mask, struct ib_udata *udata) | |
1da177e4 | 1325 | { |
727b7e9a | 1326 | u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; |
a512c2fb | 1327 | int ret; |
ed4c54e5 | 1328 | |
727b7e9a MD |
1329 | if (rdma_ib_or_roce(qp->device, port)) { |
1330 | if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) { | |
1331 | pr_warn("%s: %s rq_psn overflow, masking to 24 bits\n", | |
1332 | __func__, qp->device->name); | |
1333 | attr->rq_psn &= 0xffffff; | |
1334 | } | |
1335 | ||
1336 | if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) { | |
1337 | pr_warn("%s: %s sq_psn overflow, masking to 24 bits\n", | |
1338 | __func__, qp->device->name); | |
1339 | attr->sq_psn &= 0xffffff; | |
1340 | } | |
1341 | } | |
1342 | ||
498ca3c8 NO |
1343 | ret = ib_security_modify_qp(qp, attr, attr_mask, udata); |
1344 | if (!ret && (attr_mask & IB_QP_PORT)) | |
1345 | qp->port = attr->port_num; | |
1346 | ||
1347 | return ret; | |
a512c2fb | 1348 | } |
b96ac05a | 1349 | |
a6753c4d PP |
1350 | static bool is_qp_type_connected(const struct ib_qp *qp) |
1351 | { | |
1352 | return (qp->qp_type == IB_QPT_UC || | |
1353 | qp->qp_type == IB_QPT_RC || | |
1354 | qp->qp_type == IB_QPT_XRC_INI || | |
1355 | qp->qp_type == IB_QPT_XRC_TGT); | |
1356 | } | |
1357 | ||
b96ac05a PP |
1358 | /** |
1359 | * ib_modify_qp_with_udata - Modifies the attributes for the specified QP. | |
1360 | * @ib_qp: The QP to modify. | |
1361 | * @attr: On input, specifies the QP attributes to modify. On output, | |
1362 | * the current values of selected QP attributes are returned. | |
1363 | * @attr_mask: A bit-mask used to specify which attributes of the QP | |
1364 | * are being modified. | |
1365 | * @udata: pointer to user's input output buffer information | |
1366 | * are being modified. | |
1367 | * It returns 0 on success and returns appropriate error code on error. | |
1368 | */ | |
1369 | int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr, | |
1370 | int attr_mask, struct ib_udata *udata) | |
1371 | { | |
1372 | struct ib_qp *qp = ib_qp->real_qp; | |
1373 | int ret; | |
1374 | ||
f2290d6d | 1375 | if (attr_mask & IB_QP_AV && |
a6753c4d PP |
1376 | attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE && |
1377 | is_qp_type_connected(qp)) { | |
b96ac05a PP |
1378 | ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr); |
1379 | if (ret) | |
1380 | return ret; | |
1381 | } | |
1382 | return _ib_modify_qp(qp, attr, attr_mask, udata); | |
1383 | } | |
a512c2fb | 1384 | EXPORT_SYMBOL(ib_modify_qp_with_udata); |
ed4c54e5 | 1385 | |
d4186194 YS |
1386 | int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width) |
1387 | { | |
1388 | int rc; | |
1389 | u32 netdev_speed; | |
1390 | struct net_device *netdev; | |
1391 | struct ethtool_link_ksettings lksettings; | |
1392 | ||
1393 | if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET) | |
1394 | return -EINVAL; | |
1395 | ||
1396 | if (!dev->get_netdev) | |
1397 | return -EOPNOTSUPP; | |
1398 | ||
1399 | netdev = dev->get_netdev(dev, port_num); | |
1400 | if (!netdev) | |
1401 | return -ENODEV; | |
1402 | ||
1403 | rtnl_lock(); | |
1404 | rc = __ethtool_get_link_ksettings(netdev, &lksettings); | |
1405 | rtnl_unlock(); | |
1406 | ||
1407 | dev_put(netdev); | |
1408 | ||
1409 | if (!rc) { | |
1410 | netdev_speed = lksettings.base.speed; | |
1411 | } else { | |
1412 | netdev_speed = SPEED_1000; | |
1413 | pr_warn("%s speed is unknown, defaulting to %d\n", netdev->name, | |
1414 | netdev_speed); | |
1415 | } | |
1416 | ||
1417 | if (netdev_speed <= SPEED_1000) { | |
1418 | *width = IB_WIDTH_1X; | |
1419 | *speed = IB_SPEED_SDR; | |
1420 | } else if (netdev_speed <= SPEED_10000) { | |
1421 | *width = IB_WIDTH_1X; | |
1422 | *speed = IB_SPEED_FDR10; | |
1423 | } else if (netdev_speed <= SPEED_20000) { | |
1424 | *width = IB_WIDTH_4X; | |
1425 | *speed = IB_SPEED_DDR; | |
1426 | } else if (netdev_speed <= SPEED_25000) { | |
1427 | *width = IB_WIDTH_1X; | |
1428 | *speed = IB_SPEED_EDR; | |
1429 | } else if (netdev_speed <= SPEED_40000) { | |
1430 | *width = IB_WIDTH_4X; | |
1431 | *speed = IB_SPEED_FDR10; | |
1432 | } else { | |
1433 | *width = IB_WIDTH_4X; | |
1434 | *speed = IB_SPEED_EDR; | |
1435 | } | |
1436 | ||
1437 | return 0; | |
1438 | } | |
1439 | EXPORT_SYMBOL(ib_get_eth_speed); | |
1440 | ||
a512c2fb PP |
1441 | int ib_modify_qp(struct ib_qp *qp, |
1442 | struct ib_qp_attr *qp_attr, | |
1443 | int qp_attr_mask) | |
1444 | { | |
b96ac05a | 1445 | return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); |
1da177e4 LT |
1446 | } |
1447 | EXPORT_SYMBOL(ib_modify_qp); | |
1448 | ||
1449 | int ib_query_qp(struct ib_qp *qp, | |
1450 | struct ib_qp_attr *qp_attr, | |
1451 | int qp_attr_mask, | |
1452 | struct ib_qp_init_attr *qp_init_attr) | |
1453 | { | |
1454 | return qp->device->query_qp ? | |
0e0ec7e0 | 1455 | qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) : |
87915bf8 | 1456 | -EOPNOTSUPP; |
1da177e4 LT |
1457 | } |
1458 | EXPORT_SYMBOL(ib_query_qp); | |
1459 | ||
0e0ec7e0 SH |
1460 | int ib_close_qp(struct ib_qp *qp) |
1461 | { | |
1462 | struct ib_qp *real_qp; | |
1463 | unsigned long flags; | |
1464 | ||
1465 | real_qp = qp->real_qp; | |
1466 | if (real_qp == qp) | |
1467 | return -EINVAL; | |
1468 | ||
1469 | spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); | |
1470 | list_del(&qp->open_list); | |
1471 | spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); | |
1472 | ||
1473 | atomic_dec(&real_qp->usecnt); | |
4a50881b MS |
1474 | if (qp->qp_sec) |
1475 | ib_close_shared_qp_security(qp->qp_sec); | |
0e0ec7e0 SH |
1476 | kfree(qp); |
1477 | ||
1478 | return 0; | |
1479 | } | |
1480 | EXPORT_SYMBOL(ib_close_qp); | |
1481 | ||
1482 | static int __ib_destroy_shared_qp(struct ib_qp *qp) | |
1483 | { | |
1484 | struct ib_xrcd *xrcd; | |
1485 | struct ib_qp *real_qp; | |
1486 | int ret; | |
1487 | ||
1488 | real_qp = qp->real_qp; | |
1489 | xrcd = real_qp->xrcd; | |
1490 | ||
1491 | mutex_lock(&xrcd->tgt_qp_mutex); | |
1492 | ib_close_qp(qp); | |
1493 | if (atomic_read(&real_qp->usecnt) == 0) | |
1494 | list_del(&real_qp->xrcd_list); | |
1495 | else | |
1496 | real_qp = NULL; | |
1497 | mutex_unlock(&xrcd->tgt_qp_mutex); | |
1498 | ||
1499 | if (real_qp) { | |
1500 | ret = ib_destroy_qp(real_qp); | |
1501 | if (!ret) | |
1502 | atomic_dec(&xrcd->usecnt); | |
1503 | else | |
1504 | __ib_insert_xrcd_qp(xrcd, real_qp); | |
1505 | } | |
1506 | ||
1507 | return 0; | |
1508 | } | |
1509 | ||
1da177e4 LT |
1510 | int ib_destroy_qp(struct ib_qp *qp) |
1511 | { | |
1512 | struct ib_pd *pd; | |
1513 | struct ib_cq *scq, *rcq; | |
1514 | struct ib_srq *srq; | |
a9017e23 | 1515 | struct ib_rwq_ind_table *ind_tbl; |
d291f1a6 | 1516 | struct ib_qp_security *sec; |
1da177e4 LT |
1517 | int ret; |
1518 | ||
fffb0383 CH |
1519 | WARN_ON_ONCE(qp->mrs_used > 0); |
1520 | ||
0e0ec7e0 SH |
1521 | if (atomic_read(&qp->usecnt)) |
1522 | return -EBUSY; | |
1523 | ||
1524 | if (qp->real_qp != qp) | |
1525 | return __ib_destroy_shared_qp(qp); | |
1526 | ||
b42b63cf SH |
1527 | pd = qp->pd; |
1528 | scq = qp->send_cq; | |
1529 | rcq = qp->recv_cq; | |
1530 | srq = qp->srq; | |
a9017e23 | 1531 | ind_tbl = qp->rwq_ind_tbl; |
d291f1a6 DJ |
1532 | sec = qp->qp_sec; |
1533 | if (sec) | |
1534 | ib_destroy_qp_security_begin(sec); | |
1da177e4 | 1535 | |
a060b562 CH |
1536 | if (!qp->uobject) |
1537 | rdma_rw_cleanup_mrs(qp); | |
1538 | ||
78a0cd64 | 1539 | rdma_restrack_del(&qp->res); |
1da177e4 LT |
1540 | ret = qp->device->destroy_qp(qp); |
1541 | if (!ret) { | |
b42b63cf SH |
1542 | if (pd) |
1543 | atomic_dec(&pd->usecnt); | |
1544 | if (scq) | |
1545 | atomic_dec(&scq->usecnt); | |
1546 | if (rcq) | |
1547 | atomic_dec(&rcq->usecnt); | |
1da177e4 LT |
1548 | if (srq) |
1549 | atomic_dec(&srq->usecnt); | |
a9017e23 YH |
1550 | if (ind_tbl) |
1551 | atomic_dec(&ind_tbl->usecnt); | |
d291f1a6 DJ |
1552 | if (sec) |
1553 | ib_destroy_qp_security_end(sec); | |
1554 | } else { | |
1555 | if (sec) | |
1556 | ib_destroy_qp_security_abort(sec); | |
1da177e4 LT |
1557 | } |
1558 | ||
1559 | return ret; | |
1560 | } | |
1561 | EXPORT_SYMBOL(ib_destroy_qp); | |
1562 | ||
1563 | /* Completion queues */ | |
1564 | ||
1565 | struct ib_cq *ib_create_cq(struct ib_device *device, | |
1566 | ib_comp_handler comp_handler, | |
1567 | void (*event_handler)(struct ib_event *, void *), | |
8e37210b MB |
1568 | void *cq_context, |
1569 | const struct ib_cq_init_attr *cq_attr) | |
1da177e4 LT |
1570 | { |
1571 | struct ib_cq *cq; | |
1572 | ||
8e37210b | 1573 | cq = device->create_cq(device, cq_attr, NULL, NULL); |
1da177e4 LT |
1574 | |
1575 | if (!IS_ERR(cq)) { | |
1576 | cq->device = device; | |
b5e81bf5 | 1577 | cq->uobject = NULL; |
1da177e4 LT |
1578 | cq->comp_handler = comp_handler; |
1579 | cq->event_handler = event_handler; | |
1580 | cq->cq_context = cq_context; | |
1581 | atomic_set(&cq->usecnt, 0); | |
08f294a1 LR |
1582 | cq->res.type = RDMA_RESTRACK_CQ; |
1583 | rdma_restrack_add(&cq->res); | |
1da177e4 LT |
1584 | } |
1585 | ||
1586 | return cq; | |
1587 | } | |
1588 | EXPORT_SYMBOL(ib_create_cq); | |
1589 | ||
4190b4e9 | 1590 | int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period) |
2dd57162 EC |
1591 | { |
1592 | return cq->device->modify_cq ? | |
87915bf8 | 1593 | cq->device->modify_cq(cq, cq_count, cq_period) : -EOPNOTSUPP; |
2dd57162 | 1594 | } |
4190b4e9 | 1595 | EXPORT_SYMBOL(rdma_set_cq_moderation); |
2dd57162 | 1596 | |
1da177e4 LT |
1597 | int ib_destroy_cq(struct ib_cq *cq) |
1598 | { | |
1599 | if (atomic_read(&cq->usecnt)) | |
1600 | return -EBUSY; | |
1601 | ||
08f294a1 | 1602 | rdma_restrack_del(&cq->res); |
1da177e4 LT |
1603 | return cq->device->destroy_cq(cq); |
1604 | } | |
1605 | EXPORT_SYMBOL(ib_destroy_cq); | |
1606 | ||
a74cd4af | 1607 | int ib_resize_cq(struct ib_cq *cq, int cqe) |
1da177e4 | 1608 | { |
40de2e54 | 1609 | return cq->device->resize_cq ? |
87915bf8 | 1610 | cq->device->resize_cq(cq, cqe, NULL) : -EOPNOTSUPP; |
1da177e4 LT |
1611 | } |
1612 | EXPORT_SYMBOL(ib_resize_cq); | |
1613 | ||
1614 | /* Memory regions */ | |
1615 | ||
1da177e4 LT |
1616 | int ib_dereg_mr(struct ib_mr *mr) |
1617 | { | |
ab67ed8d | 1618 | struct ib_pd *pd = mr->pd; |
be934cca | 1619 | struct ib_dm *dm = mr->dm; |
1da177e4 LT |
1620 | int ret; |
1621 | ||
fccec5b8 | 1622 | rdma_restrack_del(&mr->res); |
1da177e4 | 1623 | ret = mr->device->dereg_mr(mr); |
be934cca | 1624 | if (!ret) { |
1da177e4 | 1625 | atomic_dec(&pd->usecnt); |
be934cca AL |
1626 | if (dm) |
1627 | atomic_dec(&dm->usecnt); | |
1628 | } | |
1da177e4 LT |
1629 | |
1630 | return ret; | |
1631 | } | |
1632 | EXPORT_SYMBOL(ib_dereg_mr); | |
1633 | ||
9bee178b SG |
1634 | /** |
1635 | * ib_alloc_mr() - Allocates a memory region | |
1636 | * @pd: protection domain associated with the region | |
1637 | * @mr_type: memory region type | |
1638 | * @max_num_sg: maximum sg entries available for registration. | |
1639 | * | |
1640 | * Notes: | |
1641 | * Memory registeration page/sg lists must not exceed max_num_sg. | |
1642 | * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed | |
1643 | * max_num_sg * used_page_size. | |
1644 | * | |
1645 | */ | |
1646 | struct ib_mr *ib_alloc_mr(struct ib_pd *pd, | |
1647 | enum ib_mr_type mr_type, | |
1648 | u32 max_num_sg) | |
00f7ec36 SW |
1649 | { |
1650 | struct ib_mr *mr; | |
1651 | ||
d9f272c5 | 1652 | if (!pd->device->alloc_mr) |
87915bf8 | 1653 | return ERR_PTR(-EOPNOTSUPP); |
00f7ec36 | 1654 | |
d9f272c5 | 1655 | mr = pd->device->alloc_mr(pd, mr_type, max_num_sg); |
00f7ec36 SW |
1656 | if (!IS_ERR(mr)) { |
1657 | mr->device = pd->device; | |
1658 | mr->pd = pd; | |
54e7e48b | 1659 | mr->dm = NULL; |
00f7ec36 SW |
1660 | mr->uobject = NULL; |
1661 | atomic_inc(&pd->usecnt); | |
d4a85c30 | 1662 | mr->need_inval = false; |
fccec5b8 SW |
1663 | mr->res.type = RDMA_RESTRACK_MR; |
1664 | rdma_restrack_add(&mr->res); | |
00f7ec36 SW |
1665 | } |
1666 | ||
1667 | return mr; | |
1668 | } | |
d9f272c5 | 1669 | EXPORT_SYMBOL(ib_alloc_mr); |
00f7ec36 | 1670 | |
1da177e4 LT |
1671 | /* "Fast" memory regions */ |
1672 | ||
1673 | struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, | |
1674 | int mr_access_flags, | |
1675 | struct ib_fmr_attr *fmr_attr) | |
1676 | { | |
1677 | struct ib_fmr *fmr; | |
1678 | ||
1679 | if (!pd->device->alloc_fmr) | |
87915bf8 | 1680 | return ERR_PTR(-EOPNOTSUPP); |
1da177e4 LT |
1681 | |
1682 | fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); | |
1683 | if (!IS_ERR(fmr)) { | |
1684 | fmr->device = pd->device; | |
1685 | fmr->pd = pd; | |
1686 | atomic_inc(&pd->usecnt); | |
1687 | } | |
1688 | ||
1689 | return fmr; | |
1690 | } | |
1691 | EXPORT_SYMBOL(ib_alloc_fmr); | |
1692 | ||
1693 | int ib_unmap_fmr(struct list_head *fmr_list) | |
1694 | { | |
1695 | struct ib_fmr *fmr; | |
1696 | ||
1697 | if (list_empty(fmr_list)) | |
1698 | return 0; | |
1699 | ||
1700 | fmr = list_entry(fmr_list->next, struct ib_fmr, list); | |
1701 | return fmr->device->unmap_fmr(fmr_list); | |
1702 | } | |
1703 | EXPORT_SYMBOL(ib_unmap_fmr); | |
1704 | ||
1705 | int ib_dealloc_fmr(struct ib_fmr *fmr) | |
1706 | { | |
1707 | struct ib_pd *pd; | |
1708 | int ret; | |
1709 | ||
1710 | pd = fmr->pd; | |
1711 | ret = fmr->device->dealloc_fmr(fmr); | |
1712 | if (!ret) | |
1713 | atomic_dec(&pd->usecnt); | |
1714 | ||
1715 | return ret; | |
1716 | } | |
1717 | EXPORT_SYMBOL(ib_dealloc_fmr); | |
1718 | ||
1719 | /* Multicast groups */ | |
1720 | ||
52363335 NO |
1721 | static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid) |
1722 | { | |
1723 | struct ib_qp_init_attr init_attr = {}; | |
1724 | struct ib_qp_attr attr = {}; | |
1725 | int num_eth_ports = 0; | |
1726 | int port; | |
1727 | ||
1728 | /* If QP state >= init, it is assigned to a port and we can check this | |
1729 | * port only. | |
1730 | */ | |
1731 | if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) { | |
1732 | if (attr.qp_state >= IB_QPS_INIT) { | |
e6f9bc34 | 1733 | if (rdma_port_get_link_layer(qp->device, attr.port_num) != |
52363335 NO |
1734 | IB_LINK_LAYER_INFINIBAND) |
1735 | return true; | |
1736 | goto lid_check; | |
1737 | } | |
1738 | } | |
1739 | ||
1740 | /* Can't get a quick answer, iterate over all ports */ | |
1741 | for (port = 0; port < qp->device->phys_port_cnt; port++) | |
e6f9bc34 | 1742 | if (rdma_port_get_link_layer(qp->device, port) != |
52363335 NO |
1743 | IB_LINK_LAYER_INFINIBAND) |
1744 | num_eth_ports++; | |
1745 | ||
1746 | /* If we have at lease one Ethernet port, RoCE annex declares that | |
1747 | * multicast LID should be ignored. We can't tell at this step if the | |
1748 | * QP belongs to an IB or Ethernet port. | |
1749 | */ | |
1750 | if (num_eth_ports) | |
1751 | return true; | |
1752 | ||
1753 | /* If all the ports are IB, we can check according to IB spec. */ | |
1754 | lid_check: | |
1755 | return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) || | |
1756 | lid == be16_to_cpu(IB_LID_PERMISSIVE)); | |
1757 | } | |
1758 | ||
1da177e4 LT |
1759 | int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) |
1760 | { | |
c3bccbfb OG |
1761 | int ret; |
1762 | ||
0c33aeed | 1763 | if (!qp->device->attach_mcast) |
87915bf8 | 1764 | return -EOPNOTSUPP; |
be1d325a NO |
1765 | |
1766 | if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || | |
1767 | qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) | |
0c33aeed JM |
1768 | return -EINVAL; |
1769 | ||
c3bccbfb OG |
1770 | ret = qp->device->attach_mcast(qp, gid, lid); |
1771 | if (!ret) | |
1772 | atomic_inc(&qp->usecnt); | |
1773 | return ret; | |
1da177e4 LT |
1774 | } |
1775 | EXPORT_SYMBOL(ib_attach_mcast); | |
1776 | ||
1777 | int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) | |
1778 | { | |
c3bccbfb OG |
1779 | int ret; |
1780 | ||
0c33aeed | 1781 | if (!qp->device->detach_mcast) |
87915bf8 | 1782 | return -EOPNOTSUPP; |
be1d325a NO |
1783 | |
1784 | if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || | |
1785 | qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) | |
0c33aeed JM |
1786 | return -EINVAL; |
1787 | ||
c3bccbfb OG |
1788 | ret = qp->device->detach_mcast(qp, gid, lid); |
1789 | if (!ret) | |
1790 | atomic_dec(&qp->usecnt); | |
1791 | return ret; | |
1da177e4 LT |
1792 | } |
1793 | EXPORT_SYMBOL(ib_detach_mcast); | |
59991f94 | 1794 | |
f66c8ba4 | 1795 | struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller) |
59991f94 SH |
1796 | { |
1797 | struct ib_xrcd *xrcd; | |
1798 | ||
1799 | if (!device->alloc_xrcd) | |
87915bf8 | 1800 | return ERR_PTR(-EOPNOTSUPP); |
59991f94 SH |
1801 | |
1802 | xrcd = device->alloc_xrcd(device, NULL, NULL); | |
1803 | if (!IS_ERR(xrcd)) { | |
1804 | xrcd->device = device; | |
53d0bd1e | 1805 | xrcd->inode = NULL; |
59991f94 | 1806 | atomic_set(&xrcd->usecnt, 0); |
d3d72d90 SH |
1807 | mutex_init(&xrcd->tgt_qp_mutex); |
1808 | INIT_LIST_HEAD(&xrcd->tgt_qp_list); | |
59991f94 SH |
1809 | } |
1810 | ||
1811 | return xrcd; | |
1812 | } | |
f66c8ba4 | 1813 | EXPORT_SYMBOL(__ib_alloc_xrcd); |
59991f94 SH |
1814 | |
1815 | int ib_dealloc_xrcd(struct ib_xrcd *xrcd) | |
1816 | { | |
d3d72d90 SH |
1817 | struct ib_qp *qp; |
1818 | int ret; | |
1819 | ||
59991f94 SH |
1820 | if (atomic_read(&xrcd->usecnt)) |
1821 | return -EBUSY; | |
1822 | ||
d3d72d90 SH |
1823 | while (!list_empty(&xrcd->tgt_qp_list)) { |
1824 | qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list); | |
1825 | ret = ib_destroy_qp(qp); | |
1826 | if (ret) | |
1827 | return ret; | |
1828 | } | |
1829 | ||
59991f94 SH |
1830 | return xrcd->device->dealloc_xrcd(xrcd); |
1831 | } | |
1832 | EXPORT_SYMBOL(ib_dealloc_xrcd); | |
319a441d | 1833 | |
5fd251c8 YH |
1834 | /** |
1835 | * ib_create_wq - Creates a WQ associated with the specified protection | |
1836 | * domain. | |
1837 | * @pd: The protection domain associated with the WQ. | |
1f58621e | 1838 | * @wq_attr: A list of initial attributes required to create the |
5fd251c8 YH |
1839 | * WQ. If WQ creation succeeds, then the attributes are updated to |
1840 | * the actual capabilities of the created WQ. | |
1841 | * | |
1f58621e | 1842 | * wq_attr->max_wr and wq_attr->max_sge determine |
5fd251c8 YH |
1843 | * the requested size of the WQ, and set to the actual values allocated |
1844 | * on return. | |
1845 | * If ib_create_wq() succeeds, then max_wr and max_sge will always be | |
1846 | * at least as large as the requested values. | |
1847 | */ | |
1848 | struct ib_wq *ib_create_wq(struct ib_pd *pd, | |
1849 | struct ib_wq_init_attr *wq_attr) | |
1850 | { | |
1851 | struct ib_wq *wq; | |
1852 | ||
1853 | if (!pd->device->create_wq) | |
87915bf8 | 1854 | return ERR_PTR(-EOPNOTSUPP); |
5fd251c8 YH |
1855 | |
1856 | wq = pd->device->create_wq(pd, wq_attr, NULL); | |
1857 | if (!IS_ERR(wq)) { | |
1858 | wq->event_handler = wq_attr->event_handler; | |
1859 | wq->wq_context = wq_attr->wq_context; | |
1860 | wq->wq_type = wq_attr->wq_type; | |
1861 | wq->cq = wq_attr->cq; | |
1862 | wq->device = pd->device; | |
1863 | wq->pd = pd; | |
1864 | wq->uobject = NULL; | |
1865 | atomic_inc(&pd->usecnt); | |
1866 | atomic_inc(&wq_attr->cq->usecnt); | |
1867 | atomic_set(&wq->usecnt, 0); | |
1868 | } | |
1869 | return wq; | |
1870 | } | |
1871 | EXPORT_SYMBOL(ib_create_wq); | |
1872 | ||
1873 | /** | |
1874 | * ib_destroy_wq - Destroys the specified WQ. | |
1875 | * @wq: The WQ to destroy. | |
1876 | */ | |
1877 | int ib_destroy_wq(struct ib_wq *wq) | |
1878 | { | |
1879 | int err; | |
1880 | struct ib_cq *cq = wq->cq; | |
1881 | struct ib_pd *pd = wq->pd; | |
1882 | ||
1883 | if (atomic_read(&wq->usecnt)) | |
1884 | return -EBUSY; | |
1885 | ||
1886 | err = wq->device->destroy_wq(wq); | |
1887 | if (!err) { | |
1888 | atomic_dec(&pd->usecnt); | |
1889 | atomic_dec(&cq->usecnt); | |
1890 | } | |
1891 | return err; | |
1892 | } | |
1893 | EXPORT_SYMBOL(ib_destroy_wq); | |
1894 | ||
1895 | /** | |
1896 | * ib_modify_wq - Modifies the specified WQ. | |
1897 | * @wq: The WQ to modify. | |
1898 | * @wq_attr: On input, specifies the WQ attributes to modify. | |
1899 | * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ | |
1900 | * are being modified. | |
1901 | * On output, the current values of selected WQ attributes are returned. | |
1902 | */ | |
1903 | int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, | |
1904 | u32 wq_attr_mask) | |
1905 | { | |
1906 | int err; | |
1907 | ||
1908 | if (!wq->device->modify_wq) | |
87915bf8 | 1909 | return -EOPNOTSUPP; |
5fd251c8 YH |
1910 | |
1911 | err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL); | |
1912 | return err; | |
1913 | } | |
1914 | EXPORT_SYMBOL(ib_modify_wq); | |
1915 | ||
6d39786b YH |
1916 | /* |
1917 | * ib_create_rwq_ind_table - Creates a RQ Indirection Table. | |
1918 | * @device: The device on which to create the rwq indirection table. | |
1919 | * @ib_rwq_ind_table_init_attr: A list of initial attributes required to | |
1920 | * create the Indirection Table. | |
1921 | * | |
1922 | * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less | |
1923 | * than the created ib_rwq_ind_table object and the caller is responsible | |
1924 | * for its memory allocation/free. | |
1925 | */ | |
1926 | struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, | |
1927 | struct ib_rwq_ind_table_init_attr *init_attr) | |
1928 | { | |
1929 | struct ib_rwq_ind_table *rwq_ind_table; | |
1930 | int i; | |
1931 | u32 table_size; | |
1932 | ||
1933 | if (!device->create_rwq_ind_table) | |
87915bf8 | 1934 | return ERR_PTR(-EOPNOTSUPP); |
6d39786b YH |
1935 | |
1936 | table_size = (1 << init_attr->log_ind_tbl_size); | |
1937 | rwq_ind_table = device->create_rwq_ind_table(device, | |
1938 | init_attr, NULL); | |
1939 | if (IS_ERR(rwq_ind_table)) | |
1940 | return rwq_ind_table; | |
1941 | ||
1942 | rwq_ind_table->ind_tbl = init_attr->ind_tbl; | |
1943 | rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size; | |
1944 | rwq_ind_table->device = device; | |
1945 | rwq_ind_table->uobject = NULL; | |
1946 | atomic_set(&rwq_ind_table->usecnt, 0); | |
1947 | ||
1948 | for (i = 0; i < table_size; i++) | |
1949 | atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt); | |
1950 | ||
1951 | return rwq_ind_table; | |
1952 | } | |
1953 | EXPORT_SYMBOL(ib_create_rwq_ind_table); | |
1954 | ||
1955 | /* | |
1956 | * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table. | |
1957 | * @wq_ind_table: The Indirection Table to destroy. | |
1958 | */ | |
1959 | int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table) | |
1960 | { | |
1961 | int err, i; | |
1962 | u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size); | |
1963 | struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl; | |
1964 | ||
1965 | if (atomic_read(&rwq_ind_table->usecnt)) | |
1966 | return -EBUSY; | |
1967 | ||
1968 | err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table); | |
1969 | if (!err) { | |
1970 | for (i = 0; i < table_size; i++) | |
1971 | atomic_dec(&ind_tbl[i]->usecnt); | |
1972 | } | |
1973 | ||
1974 | return err; | |
1975 | } | |
1976 | EXPORT_SYMBOL(ib_destroy_rwq_ind_table); | |
1977 | ||
319a441d HHZ |
1978 | struct ib_flow *ib_create_flow(struct ib_qp *qp, |
1979 | struct ib_flow_attr *flow_attr, | |
1980 | int domain) | |
1981 | { | |
1982 | struct ib_flow *flow_id; | |
1983 | if (!qp->device->create_flow) | |
87915bf8 | 1984 | return ERR_PTR(-EOPNOTSUPP); |
319a441d HHZ |
1985 | |
1986 | flow_id = qp->device->create_flow(qp, flow_attr, domain); | |
8ecc7985 | 1987 | if (!IS_ERR(flow_id)) { |
319a441d | 1988 | atomic_inc(&qp->usecnt); |
8ecc7985 MB |
1989 | flow_id->qp = qp; |
1990 | } | |
319a441d HHZ |
1991 | return flow_id; |
1992 | } | |
1993 | EXPORT_SYMBOL(ib_create_flow); | |
1994 | ||
1995 | int ib_destroy_flow(struct ib_flow *flow_id) | |
1996 | { | |
1997 | int err; | |
1998 | struct ib_qp *qp = flow_id->qp; | |
1999 | ||
2000 | err = qp->device->destroy_flow(flow_id); | |
2001 | if (!err) | |
2002 | atomic_dec(&qp->usecnt); | |
2003 | return err; | |
2004 | } | |
2005 | EXPORT_SYMBOL(ib_destroy_flow); | |
1b01d335 SG |
2006 | |
2007 | int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, | |
2008 | struct ib_mr_status *mr_status) | |
2009 | { | |
2010 | return mr->device->check_mr_status ? | |
87915bf8 | 2011 | mr->device->check_mr_status(mr, check_mask, mr_status) : -EOPNOTSUPP; |
1b01d335 SG |
2012 | } |
2013 | EXPORT_SYMBOL(ib_check_mr_status); | |
4c67e2bf | 2014 | |
50174a7f EC |
2015 | int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, |
2016 | int state) | |
2017 | { | |
2018 | if (!device->set_vf_link_state) | |
87915bf8 | 2019 | return -EOPNOTSUPP; |
50174a7f EC |
2020 | |
2021 | return device->set_vf_link_state(device, vf, port, state); | |
2022 | } | |
2023 | EXPORT_SYMBOL(ib_set_vf_link_state); | |
2024 | ||
2025 | int ib_get_vf_config(struct ib_device *device, int vf, u8 port, | |
2026 | struct ifla_vf_info *info) | |
2027 | { | |
2028 | if (!device->get_vf_config) | |
87915bf8 | 2029 | return -EOPNOTSUPP; |
50174a7f EC |
2030 | |
2031 | return device->get_vf_config(device, vf, port, info); | |
2032 | } | |
2033 | EXPORT_SYMBOL(ib_get_vf_config); | |
2034 | ||
2035 | int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, | |
2036 | struct ifla_vf_stats *stats) | |
2037 | { | |
2038 | if (!device->get_vf_stats) | |
87915bf8 | 2039 | return -EOPNOTSUPP; |
50174a7f EC |
2040 | |
2041 | return device->get_vf_stats(device, vf, port, stats); | |
2042 | } | |
2043 | EXPORT_SYMBOL(ib_get_vf_stats); | |
2044 | ||
2045 | int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, | |
2046 | int type) | |
2047 | { | |
2048 | if (!device->set_vf_guid) | |
87915bf8 | 2049 | return -EOPNOTSUPP; |
50174a7f EC |
2050 | |
2051 | return device->set_vf_guid(device, vf, port, guid, type); | |
2052 | } | |
2053 | EXPORT_SYMBOL(ib_set_vf_guid); | |
2054 | ||
4c67e2bf SG |
2055 | /** |
2056 | * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list | |
2057 | * and set it the memory region. | |
2058 | * @mr: memory region | |
2059 | * @sg: dma mapped scatterlist | |
2060 | * @sg_nents: number of entries in sg | |
ff2ba993 | 2061 | * @sg_offset: offset in bytes into sg |
4c67e2bf SG |
2062 | * @page_size: page vector desired page size |
2063 | * | |
2064 | * Constraints: | |
2065 | * - The first sg element is allowed to have an offset. | |
52746129 BVA |
2066 | * - Each sg element must either be aligned to page_size or virtually |
2067 | * contiguous to the previous element. In case an sg element has a | |
2068 | * non-contiguous offset, the mapping prefix will not include it. | |
4c67e2bf SG |
2069 | * - The last sg element is allowed to have length less than page_size. |
2070 | * - If sg_nents total byte length exceeds the mr max_num_sge * page_size | |
2071 | * then only max_num_sg entries will be mapped. | |
52746129 | 2072 | * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these |
f5aa9159 | 2073 | * constraints holds and the page_size argument is ignored. |
4c67e2bf SG |
2074 | * |
2075 | * Returns the number of sg elements that were mapped to the memory region. | |
2076 | * | |
2077 | * After this completes successfully, the memory region | |
2078 | * is ready for registration. | |
2079 | */ | |
ff2ba993 | 2080 | int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, |
9aa8b321 | 2081 | unsigned int *sg_offset, unsigned int page_size) |
4c67e2bf SG |
2082 | { |
2083 | if (unlikely(!mr->device->map_mr_sg)) | |
87915bf8 | 2084 | return -EOPNOTSUPP; |
4c67e2bf SG |
2085 | |
2086 | mr->page_size = page_size; | |
2087 | ||
ff2ba993 | 2088 | return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset); |
4c67e2bf SG |
2089 | } |
2090 | EXPORT_SYMBOL(ib_map_mr_sg); | |
2091 | ||
2092 | /** | |
2093 | * ib_sg_to_pages() - Convert the largest prefix of a sg list | |
2094 | * to a page vector | |
2095 | * @mr: memory region | |
2096 | * @sgl: dma mapped scatterlist | |
2097 | * @sg_nents: number of entries in sg | |
9aa8b321 BVA |
2098 | * @sg_offset_p: IN: start offset in bytes into sg |
2099 | * OUT: offset in bytes for element n of the sg of the first | |
2100 | * byte that has not been processed where n is the return | |
2101 | * value of this function. | |
4c67e2bf SG |
2102 | * @set_page: driver page assignment function pointer |
2103 | * | |
8f5ba10e | 2104 | * Core service helper for drivers to convert the largest |
4c67e2bf SG |
2105 | * prefix of given sg list to a page vector. The sg list |
2106 | * prefix converted is the prefix that meet the requirements | |
2107 | * of ib_map_mr_sg. | |
2108 | * | |
2109 | * Returns the number of sg elements that were assigned to | |
2110 | * a page vector. | |
2111 | */ | |
ff2ba993 | 2112 | int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, |
9aa8b321 | 2113 | unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64)) |
4c67e2bf SG |
2114 | { |
2115 | struct scatterlist *sg; | |
b6aeb980 | 2116 | u64 last_end_dma_addr = 0; |
9aa8b321 | 2117 | unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; |
4c67e2bf SG |
2118 | unsigned int last_page_off = 0; |
2119 | u64 page_mask = ~((u64)mr->page_size - 1); | |
8f5ba10e | 2120 | int i, ret; |
4c67e2bf | 2121 | |
9aa8b321 BVA |
2122 | if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0]))) |
2123 | return -EINVAL; | |
2124 | ||
ff2ba993 | 2125 | mr->iova = sg_dma_address(&sgl[0]) + sg_offset; |
4c67e2bf SG |
2126 | mr->length = 0; |
2127 | ||
2128 | for_each_sg(sgl, sg, sg_nents, i) { | |
ff2ba993 | 2129 | u64 dma_addr = sg_dma_address(sg) + sg_offset; |
9aa8b321 | 2130 | u64 prev_addr = dma_addr; |
ff2ba993 | 2131 | unsigned int dma_len = sg_dma_len(sg) - sg_offset; |
4c67e2bf SG |
2132 | u64 end_dma_addr = dma_addr + dma_len; |
2133 | u64 page_addr = dma_addr & page_mask; | |
2134 | ||
8f5ba10e BVA |
2135 | /* |
2136 | * For the second and later elements, check whether either the | |
2137 | * end of element i-1 or the start of element i is not aligned | |
2138 | * on a page boundary. | |
2139 | */ | |
2140 | if (i && (last_page_off != 0 || page_addr != dma_addr)) { | |
2141 | /* Stop mapping if there is a gap. */ | |
2142 | if (last_end_dma_addr != dma_addr) | |
2143 | break; | |
2144 | ||
2145 | /* | |
2146 | * Coalesce this element with the last. If it is small | |
2147 | * enough just update mr->length. Otherwise start | |
2148 | * mapping from the next page. | |
2149 | */ | |
2150 | goto next_page; | |
4c67e2bf SG |
2151 | } |
2152 | ||
2153 | do { | |
8f5ba10e | 2154 | ret = set_page(mr, page_addr); |
9aa8b321 BVA |
2155 | if (unlikely(ret < 0)) { |
2156 | sg_offset = prev_addr - sg_dma_address(sg); | |
2157 | mr->length += prev_addr - dma_addr; | |
2158 | if (sg_offset_p) | |
2159 | *sg_offset_p = sg_offset; | |
2160 | return i || sg_offset ? i : ret; | |
2161 | } | |
2162 | prev_addr = page_addr; | |
8f5ba10e | 2163 | next_page: |
4c67e2bf SG |
2164 | page_addr += mr->page_size; |
2165 | } while (page_addr < end_dma_addr); | |
2166 | ||
2167 | mr->length += dma_len; | |
2168 | last_end_dma_addr = end_dma_addr; | |
4c67e2bf | 2169 | last_page_off = end_dma_addr & ~page_mask; |
ff2ba993 CH |
2170 | |
2171 | sg_offset = 0; | |
4c67e2bf SG |
2172 | } |
2173 | ||
9aa8b321 BVA |
2174 | if (sg_offset_p) |
2175 | *sg_offset_p = 0; | |
4c67e2bf SG |
2176 | return i; |
2177 | } | |
2178 | EXPORT_SYMBOL(ib_sg_to_pages); | |
765d6774 SW |
2179 | |
2180 | struct ib_drain_cqe { | |
2181 | struct ib_cqe cqe; | |
2182 | struct completion done; | |
2183 | }; | |
2184 | ||
2185 | static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) | |
2186 | { | |
2187 | struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe, | |
2188 | cqe); | |
2189 | ||
2190 | complete(&cqe->done); | |
2191 | } | |
2192 | ||
2193 | /* | |
2194 | * Post a WR and block until its completion is reaped for the SQ. | |
2195 | */ | |
2196 | static void __ib_drain_sq(struct ib_qp *qp) | |
2197 | { | |
f039f44f | 2198 | struct ib_cq *cq = qp->send_cq; |
765d6774 SW |
2199 | struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; |
2200 | struct ib_drain_cqe sdrain; | |
a1ae7d03 BVA |
2201 | struct ib_send_wr *bad_swr; |
2202 | struct ib_rdma_wr swr = { | |
2203 | .wr = { | |
6ee68773 AM |
2204 | .next = NULL, |
2205 | { .wr_cqe = &sdrain.cqe, }, | |
a1ae7d03 | 2206 | .opcode = IB_WR_RDMA_WRITE, |
a1ae7d03 BVA |
2207 | }, |
2208 | }; | |
765d6774 SW |
2209 | int ret; |
2210 | ||
765d6774 SW |
2211 | ret = ib_modify_qp(qp, &attr, IB_QP_STATE); |
2212 | if (ret) { | |
2213 | WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); | |
2214 | return; | |
2215 | } | |
2216 | ||
aaebd377 MG |
2217 | sdrain.cqe.done = ib_drain_qp_done; |
2218 | init_completion(&sdrain.done); | |
2219 | ||
a1ae7d03 | 2220 | ret = ib_post_send(qp, &swr.wr, &bad_swr); |
765d6774 SW |
2221 | if (ret) { |
2222 | WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); | |
2223 | return; | |
2224 | } | |
2225 | ||
f039f44f BVA |
2226 | if (cq->poll_ctx == IB_POLL_DIRECT) |
2227 | while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0) | |
2228 | ib_process_cq_direct(cq, -1); | |
2229 | else | |
2230 | wait_for_completion(&sdrain.done); | |
765d6774 SW |
2231 | } |
2232 | ||
2233 | /* | |
2234 | * Post a WR and block until its completion is reaped for the RQ. | |
2235 | */ | |
2236 | static void __ib_drain_rq(struct ib_qp *qp) | |
2237 | { | |
f039f44f | 2238 | struct ib_cq *cq = qp->recv_cq; |
765d6774 SW |
2239 | struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; |
2240 | struct ib_drain_cqe rdrain; | |
2241 | struct ib_recv_wr rwr = {}, *bad_rwr; | |
2242 | int ret; | |
2243 | ||
765d6774 SW |
2244 | ret = ib_modify_qp(qp, &attr, IB_QP_STATE); |
2245 | if (ret) { | |
2246 | WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); | |
2247 | return; | |
2248 | } | |
2249 | ||
aaebd377 MG |
2250 | rwr.wr_cqe = &rdrain.cqe; |
2251 | rdrain.cqe.done = ib_drain_qp_done; | |
2252 | init_completion(&rdrain.done); | |
2253 | ||
765d6774 SW |
2254 | ret = ib_post_recv(qp, &rwr, &bad_rwr); |
2255 | if (ret) { | |
2256 | WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); | |
2257 | return; | |
2258 | } | |
2259 | ||
f039f44f BVA |
2260 | if (cq->poll_ctx == IB_POLL_DIRECT) |
2261 | while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0) | |
2262 | ib_process_cq_direct(cq, -1); | |
2263 | else | |
2264 | wait_for_completion(&rdrain.done); | |
765d6774 SW |
2265 | } |
2266 | ||
2267 | /** | |
2268 | * ib_drain_sq() - Block until all SQ CQEs have been consumed by the | |
2269 | * application. | |
2270 | * @qp: queue pair to drain | |
2271 | * | |
2272 | * If the device has a provider-specific drain function, then | |
2273 | * call that. Otherwise call the generic drain function | |
2274 | * __ib_drain_sq(). | |
2275 | * | |
2276 | * The caller must: | |
2277 | * | |
2278 | * ensure there is room in the CQ and SQ for the drain work request and | |
2279 | * completion. | |
2280 | * | |
f039f44f | 2281 | * allocate the CQ using ib_alloc_cq(). |
765d6774 SW |
2282 | * |
2283 | * ensure that there are no other contexts that are posting WRs concurrently. | |
2284 | * Otherwise the drain is not guaranteed. | |
2285 | */ | |
2286 | void ib_drain_sq(struct ib_qp *qp) | |
2287 | { | |
2288 | if (qp->device->drain_sq) | |
2289 | qp->device->drain_sq(qp); | |
2290 | else | |
2291 | __ib_drain_sq(qp); | |
2292 | } | |
2293 | EXPORT_SYMBOL(ib_drain_sq); | |
2294 | ||
2295 | /** | |
2296 | * ib_drain_rq() - Block until all RQ CQEs have been consumed by the | |
2297 | * application. | |
2298 | * @qp: queue pair to drain | |
2299 | * | |
2300 | * If the device has a provider-specific drain function, then | |
2301 | * call that. Otherwise call the generic drain function | |
2302 | * __ib_drain_rq(). | |
2303 | * | |
2304 | * The caller must: | |
2305 | * | |
2306 | * ensure there is room in the CQ and RQ for the drain work request and | |
2307 | * completion. | |
2308 | * | |
f039f44f | 2309 | * allocate the CQ using ib_alloc_cq(). |
765d6774 SW |
2310 | * |
2311 | * ensure that there are no other contexts that are posting WRs concurrently. | |
2312 | * Otherwise the drain is not guaranteed. | |
2313 | */ | |
2314 | void ib_drain_rq(struct ib_qp *qp) | |
2315 | { | |
2316 | if (qp->device->drain_rq) | |
2317 | qp->device->drain_rq(qp); | |
2318 | else | |
2319 | __ib_drain_rq(qp); | |
2320 | } | |
2321 | EXPORT_SYMBOL(ib_drain_rq); | |
2322 | ||
2323 | /** | |
2324 | * ib_drain_qp() - Block until all CQEs have been consumed by the | |
2325 | * application on both the RQ and SQ. | |
2326 | * @qp: queue pair to drain | |
2327 | * | |
2328 | * The caller must: | |
2329 | * | |
2330 | * ensure there is room in the CQ(s), SQ, and RQ for drain work requests | |
2331 | * and completions. | |
2332 | * | |
f039f44f | 2333 | * allocate the CQs using ib_alloc_cq(). |
765d6774 SW |
2334 | * |
2335 | * ensure that there are no other contexts that are posting WRs concurrently. | |
2336 | * Otherwise the drain is not guaranteed. | |
2337 | */ | |
2338 | void ib_drain_qp(struct ib_qp *qp) | |
2339 | { | |
2340 | ib_drain_sq(qp); | |
42235f80 SG |
2341 | if (!qp->srq) |
2342 | ib_drain_rq(qp); | |
765d6774 SW |
2343 | } |
2344 | EXPORT_SYMBOL(ib_drain_qp); |