Commit | Line | Data |
---|---|---|
63fa15db | 1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
8700e3e7 MS |
2 | /* |
3 | * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. | |
4 | * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. | |
8700e3e7 MS |
5 | */ |
6 | ||
0bbb3b74 | 7 | #include <linux/dma-mapping.h> |
4d6f2859 | 8 | #include <net/addrconf.h> |
89944450 | 9 | #include <rdma/uverbs_ioctl.h> |
0b1fbfb9 | 10 | |
8700e3e7 | 11 | #include "rxe.h" |
8700e3e7 | 12 | #include "rxe_queue.h" |
0b1e5b99 | 13 | #include "rxe_hw_counters.h" |
8700e3e7 | 14 | |
5bf944f2 BP |
15 | static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr); |
16 | ||
17 | /* dev */ | |
18 | static int rxe_query_device(struct ib_device *ibdev, | |
8700e3e7 | 19 | struct ib_device_attr *attr, |
5bf944f2 | 20 | struct ib_udata *udata) |
8700e3e7 | 21 | { |
5bf944f2 BP |
22 | struct rxe_dev *rxe = to_rdev(ibdev); |
23 | int err; | |
24 | ||
25 | if (udata->inlen || udata->outlen) { | |
64827180 | 26 | rxe_dbg_dev(rxe, "malformed udata\n"); |
5bf944f2 BP |
27 | err = -EINVAL; |
28 | goto err_out; | |
29 | } | |
8700e3e7 | 30 | |
5bf944f2 | 31 | memcpy(attr, &rxe->attr, sizeof(*attr)); |
8700e3e7 | 32 | |
8700e3e7 | 33 | return 0; |
5bf944f2 BP |
34 | |
35 | err_out: | |
64827180 | 36 | rxe_err_dev(rxe, "returned err = %d\n", err); |
5bf944f2 | 37 | return err; |
8700e3e7 MS |
38 | } |
39 | ||
5bf944f2 | 40 | static int rxe_query_port(struct ib_device *ibdev, |
1fb7f897 | 41 | u32 port_num, struct ib_port_attr *attr) |
8700e3e7 | 42 | { |
5bf944f2 BP |
43 | struct rxe_dev *rxe = to_rdev(ibdev); |
44 | int err, ret; | |
8700e3e7 | 45 | |
5bf944f2 BP |
46 | if (port_num != 1) { |
47 | err = -EINVAL; | |
64827180 | 48 | rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num); |
5bf944f2 BP |
49 | goto err_out; |
50 | } | |
51 | ||
52 | memcpy(attr, &rxe->port.attr, sizeof(*attr)); | |
8700e3e7 MS |
53 | |
54 | mutex_lock(&rxe->usdev_lock); | |
5bf944f2 BP |
55 | ret = ib_get_eth_speed(ibdev, port_num, &attr->active_speed, |
56 | &attr->active_width); | |
5736c7c4 AB |
57 | |
58 | if (attr->state == IB_PORT_ACTIVE) | |
72a7720f | 59 | attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; |
5736c7c4 | 60 | else if (dev_get_flags(rxe->ndev) & IFF_UP) |
72a7720f | 61 | attr->phys_state = IB_PORT_PHYS_STATE_POLLING; |
5736c7c4 | 62 | else |
72a7720f | 63 | attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; |
5736c7c4 | 64 | |
8700e3e7 MS |
65 | mutex_unlock(&rxe->usdev_lock); |
66 | ||
5bf944f2 BP |
67 | return ret; |
68 | ||
69 | err_out: | |
64827180 | 70 | rxe_err_dev(rxe, "returned err = %d\n", err); |
5bf944f2 | 71 | return err; |
8700e3e7 MS |
72 | } |
73 | ||
5bf944f2 | 74 | static int rxe_query_pkey(struct ib_device *ibdev, |
1fb7f897 | 75 | u32 port_num, u16 index, u16 *pkey) |
8700e3e7 | 76 | { |
5bf944f2 BP |
77 | struct rxe_dev *rxe = to_rdev(ibdev); |
78 | int err; | |
79 | ||
80 | if (index != 0) { | |
81 | err = -EINVAL; | |
64827180 | 82 | rxe_dbg_dev(rxe, "bad pkey index = %d\n", index); |
5bf944f2 BP |
83 | goto err_out; |
84 | } | |
8700e3e7 | 85 | |
76251e15 | 86 | *pkey = IB_DEFAULT_PKEY_FULL; |
8700e3e7 | 87 | return 0; |
5bf944f2 BP |
88 | |
89 | err_out: | |
64827180 | 90 | rxe_err_dev(rxe, "returned err = %d\n", err); |
5bf944f2 | 91 | return err; |
8700e3e7 MS |
92 | } |
93 | ||
5bf944f2 | 94 | static int rxe_modify_device(struct ib_device *ibdev, |
8700e3e7 MS |
95 | int mask, struct ib_device_modify *attr) |
96 | { | |
5bf944f2 BP |
97 | struct rxe_dev *rxe = to_rdev(ibdev); |
98 | int err; | |
8700e3e7 | 99 | |
f3fceba5 | 100 | if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID | |
5bf944f2 BP |
101 | IB_DEVICE_MODIFY_NODE_DESC)) { |
102 | err = -EOPNOTSUPP; | |
64827180 | 103 | rxe_dbg_dev(rxe, "unsupported mask = 0x%x\n", mask); |
5bf944f2 BP |
104 | goto err_out; |
105 | } | |
f3fceba5 | 106 | |
8700e3e7 MS |
107 | if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) |
108 | rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid); | |
109 | ||
110 | if (mask & IB_DEVICE_MODIFY_NODE_DESC) { | |
111 | memcpy(rxe->ib_dev.node_desc, | |
112 | attr->node_desc, sizeof(rxe->ib_dev.node_desc)); | |
113 | } | |
114 | ||
115 | return 0; | |
5bf944f2 BP |
116 | |
117 | err_out: | |
64827180 | 118 | rxe_err_dev(rxe, "returned err = %d\n", err); |
5bf944f2 | 119 | return err; |
8700e3e7 MS |
120 | } |
121 | ||
5bf944f2 BP |
122 | static int rxe_modify_port(struct ib_device *ibdev, u32 port_num, |
123 | int mask, struct ib_port_modify *attr) | |
8700e3e7 | 124 | { |
5bf944f2 | 125 | struct rxe_dev *rxe = to_rdev(ibdev); |
8700e3e7 | 126 | struct rxe_port *port; |
5bf944f2 | 127 | int err; |
8700e3e7 | 128 | |
5bf944f2 BP |
129 | if (port_num != 1) { |
130 | err = -EINVAL; | |
64827180 | 131 | rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num); |
5bf944f2 BP |
132 | goto err_out; |
133 | } | |
8700e3e7 | 134 | |
5bf944f2 BP |
135 | //TODO is shutdown useful |
136 | if (mask & ~(IB_PORT_RESET_QKEY_CNTR)) { | |
137 | err = -EOPNOTSUPP; | |
64827180 | 138 | rxe_dbg_dev(rxe, "unsupported mask = 0x%x\n", mask); |
5bf944f2 BP |
139 | goto err_out; |
140 | } | |
141 | ||
142 | port = &rxe->port; | |
8700e3e7 MS |
143 | port->attr.port_cap_flags |= attr->set_port_cap_mask; |
144 | port->attr.port_cap_flags &= ~attr->clr_port_cap_mask; | |
145 | ||
146 | if (mask & IB_PORT_RESET_QKEY_CNTR) | |
147 | port->attr.qkey_viol_cntr = 0; | |
148 | ||
149 | return 0; | |
8700e3e7 | 150 | |
5bf944f2 | 151 | err_out: |
64827180 | 152 | rxe_err_dev(rxe, "returned err = %d\n", err); |
5bf944f2 | 153 | return err; |
8700e3e7 MS |
154 | } |
155 | ||
5bf944f2 BP |
156 | static enum rdma_link_layer rxe_get_link_layer(struct ib_device *ibdev, |
157 | u32 port_num) | |
8700e3e7 | 158 | { |
5bf944f2 BP |
159 | struct rxe_dev *rxe = to_rdev(ibdev); |
160 | int err; | |
8700e3e7 | 161 | |
5bf944f2 BP |
162 | if (port_num != 1) { |
163 | err = -EINVAL; | |
64827180 | 164 | rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num); |
5bf944f2 BP |
165 | goto err_out; |
166 | } | |
8700e3e7 | 167 | |
5bf944f2 | 168 | return IB_LINK_LAYER_ETHERNET; |
8700e3e7 | 169 | |
5bf944f2 | 170 | err_out: |
64827180 | 171 | rxe_err_dev(rxe, "returned err = %d\n", err); |
5bf944f2 | 172 | return err; |
8700e3e7 MS |
173 | } |
174 | ||
5bf944f2 | 175 | static int rxe_port_immutable(struct ib_device *ibdev, u32 port_num, |
8700e3e7 MS |
176 | struct ib_port_immutable *immutable) |
177 | { | |
5bf944f2 BP |
178 | struct rxe_dev *rxe = to_rdev(ibdev); |
179 | struct ib_port_attr attr = {}; | |
8700e3e7 | 180 | int err; |
8700e3e7 | 181 | |
5bf944f2 BP |
182 | if (port_num != 1) { |
183 | err = -EINVAL; | |
64827180 | 184 | rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num); |
5bf944f2 BP |
185 | goto err_out; |
186 | } | |
c4550c63 | 187 | |
5bf944f2 | 188 | err = ib_query_port(ibdev, port_num, &attr); |
8700e3e7 | 189 | if (err) |
5bf944f2 | 190 | goto err_out; |
8700e3e7 | 191 | |
5bf944f2 | 192 | immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; |
8700e3e7 MS |
193 | immutable->pkey_tbl_len = attr.pkey_tbl_len; |
194 | immutable->gid_tbl_len = attr.gid_tbl_len; | |
8700e3e7 MS |
195 | immutable->max_mad_size = IB_MGMT_MAD_SIZE; |
196 | ||
197 | return 0; | |
5bf944f2 BP |
198 | |
199 | err_out: | |
64827180 | 200 | rxe_err_dev(rxe, "returned err = %d\n", err); |
5bf944f2 | 201 | return err; |
8700e3e7 MS |
202 | } |
203 | ||
5bf944f2 BP |
204 | /* uc */ |
205 | static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata) | |
206 | { | |
207 | struct rxe_dev *rxe = to_rdev(ibuc->device); | |
208 | struct rxe_ucontext *uc = to_ruc(ibuc); | |
209 | int err; | |
210 | ||
211 | err = rxe_add_to_pool(&rxe->uc_pool, uc); | |
212 | if (err) | |
64827180 | 213 | rxe_err_dev(rxe, "unable to create uc\n"); |
5bf944f2 BP |
214 | |
215 | return err; | |
216 | } | |
217 | ||
218 | static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc) | |
219 | { | |
220 | struct rxe_ucontext *uc = to_ruc(ibuc); | |
221 | int err; | |
222 | ||
223 | err = rxe_cleanup(uc); | |
224 | if (err) | |
64827180 | 225 | rxe_err_uc(uc, "cleanup failed, err = %d\n", err); |
5bf944f2 BP |
226 | } |
227 | ||
228 | /* pd */ | |
ff23dfa1 | 229 | static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) |
8700e3e7 | 230 | { |
21a428a0 LR |
231 | struct rxe_dev *rxe = to_rdev(ibpd->device); |
232 | struct rxe_pd *pd = to_rpd(ibpd); | |
5bf944f2 BP |
233 | int err; |
234 | ||
235 | err = rxe_add_to_pool(&rxe->pd_pool, pd); | |
236 | if (err) { | |
64827180 | 237 | rxe_dbg_dev(rxe, "unable to alloc pd\n"); |
5bf944f2 BP |
238 | goto err_out; |
239 | } | |
8700e3e7 | 240 | |
5bf944f2 BP |
241 | return 0; |
242 | ||
243 | err_out: | |
64827180 | 244 | rxe_err_dev(rxe, "returned err = %d\n", err); |
5bf944f2 | 245 | return err; |
8700e3e7 MS |
246 | } |
247 | ||
91a7c58f | 248 | static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) |
8700e3e7 MS |
249 | { |
250 | struct rxe_pd *pd = to_rpd(ibpd); | |
5bf944f2 BP |
251 | int err; |
252 | ||
253 | err = rxe_cleanup(pd); | |
254 | if (err) | |
64827180 | 255 | rxe_err_pd(pd, "cleanup failed, err = %d\n", err); |
8700e3e7 | 256 | |
91a7c58f | 257 | return 0; |
8700e3e7 MS |
258 | } |
259 | ||
5bf944f2 | 260 | /* ah */ |
fa5d010c MG |
261 | static int rxe_create_ah(struct ib_ah *ibah, |
262 | struct rdma_ah_init_attr *init_attr, | |
263 | struct ib_udata *udata) | |
8700e3e7 | 264 | { |
d3456914 LR |
265 | struct rxe_dev *rxe = to_rdev(ibah->device); |
266 | struct rxe_ah *ah = to_rah(ibah); | |
73a54932 | 267 | struct rxe_create_ah_resp __user *uresp = NULL; |
5bf944f2 | 268 | int err, cleanup_err; |
73a54932 BP |
269 | |
270 | if (udata) { | |
271 | /* test if new user provider */ | |
272 | if (udata->outlen >= sizeof(*uresp)) | |
273 | uresp = udata->outbuf; | |
274 | ah->is_user = true; | |
275 | } else { | |
276 | ah->is_user = false; | |
277 | } | |
8700e3e7 | 278 | |
215d0a75 BP |
279 | err = rxe_add_to_pool_ah(&rxe->ah_pool, ah, |
280 | init_attr->flags & RDMA_CREATE_AH_SLEEPABLE); | |
5bf944f2 | 281 | if (err) { |
64827180 | 282 | rxe_dbg_dev(rxe, "unable to create ah\n"); |
5bf944f2 BP |
283 | goto err_out; |
284 | } | |
8700e3e7 | 285 | |
73a54932 | 286 | /* create index > 0 */ |
02827b67 | 287 | ah->ah_num = ah->elem.index; |
73a54932 | 288 | |
25fd735a BP |
289 | err = rxe_ah_chk_attr(ah, init_attr->ah_attr); |
290 | if (err) { | |
64827180 | 291 | rxe_dbg_ah(ah, "bad attr\n"); |
5bf944f2 | 292 | goto err_cleanup; |
25fd735a BP |
293 | } |
294 | ||
73a54932 BP |
295 | if (uresp) { |
296 | /* only if new user provider */ | |
297 | err = copy_to_user(&uresp->ah_num, &ah->ah_num, | |
298 | sizeof(uresp->ah_num)); | |
299 | if (err) { | |
5bf944f2 | 300 | err = -EFAULT; |
64827180 | 301 | rxe_dbg_ah(ah, "unable to copy to user\n"); |
5bf944f2 | 302 | goto err_cleanup; |
73a54932 BP |
303 | } |
304 | } else if (ah->is_user) { | |
305 | /* only if old user provider */ | |
306 | ah->ah_num = 0; | |
307 | } | |
308 | ||
fa5d010c | 309 | rxe_init_av(init_attr->ah_attr, &ah->av); |
215d0a75 BP |
310 | rxe_finalize(ah); |
311 | ||
d3456914 | 312 | return 0; |
5bf944f2 BP |
313 | |
314 | err_cleanup: | |
315 | cleanup_err = rxe_cleanup(ah); | |
316 | if (cleanup_err) | |
64827180 | 317 | rxe_err_ah(ah, "cleanup failed, err = %d\n", cleanup_err); |
5bf944f2 | 318 | err_out: |
64827180 | 319 | rxe_err_ah(ah, "returned err = %d\n", err); |
5bf944f2 | 320 | return err; |
8700e3e7 MS |
321 | } |
322 | ||
90898850 | 323 | static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) |
8700e3e7 | 324 | { |
8700e3e7 | 325 | struct rxe_ah *ah = to_rah(ibah); |
5bf944f2 | 326 | int err; |
8700e3e7 | 327 | |
25fd735a | 328 | err = rxe_ah_chk_attr(ah, attr); |
5bf944f2 | 329 | if (err) { |
64827180 | 330 | rxe_dbg_ah(ah, "bad attr\n"); |
5bf944f2 BP |
331 | goto err_out; |
332 | } | |
8700e3e7 | 333 | |
fa407188 | 334 | rxe_init_av(attr, &ah->av); |
5bf944f2 | 335 | |
8700e3e7 | 336 | return 0; |
5bf944f2 BP |
337 | |
338 | err_out: | |
64827180 | 339 | rxe_err_ah(ah, "returned err = %d\n", err); |
5bf944f2 | 340 | return err; |
8700e3e7 MS |
341 | } |
342 | ||
90898850 | 343 | static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) |
8700e3e7 | 344 | { |
8700e3e7 MS |
345 | struct rxe_ah *ah = to_rah(ibah); |
346 | ||
eca7ddf9 | 347 | memset(attr, 0, sizeof(*attr)); |
44c58487 | 348 | attr->type = ibah->type; |
9c96f3d4 | 349 | rxe_av_to_attr(&ah->av, attr); |
5bf944f2 | 350 | |
8700e3e7 MS |
351 | return 0; |
352 | } | |
353 | ||
9a9ebf8c | 354 | static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags) |
8700e3e7 MS |
355 | { |
356 | struct rxe_ah *ah = to_rah(ibah); | |
5bf944f2 | 357 | int err; |
8700e3e7 | 358 | |
5bf944f2 BP |
359 | err = rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE); |
360 | if (err) | |
64827180 | 361 | rxe_err_ah(ah, "cleanup failed, err = %d\n", err); |
5bcf5a59 | 362 | |
8700e3e7 | 363 | return 0; |
8700e3e7 MS |
364 | } |
365 | ||
5bf944f2 | 366 | /* srq */ |
68e326de LR |
367 | static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init, |
368 | struct ib_udata *udata) | |
8700e3e7 | 369 | { |
68e326de LR |
370 | struct rxe_dev *rxe = to_rdev(ibsrq->device); |
371 | struct rxe_pd *pd = to_rpd(ibsrq->pd); | |
372 | struct rxe_srq *srq = to_rsrq(ibsrq); | |
0c43ab37 | 373 | struct rxe_create_srq_resp __user *uresp = NULL; |
5bf944f2 | 374 | int err, cleanup_err; |
0c43ab37 JG |
375 | |
376 | if (udata) { | |
5bf944f2 BP |
377 | if (udata->outlen < sizeof(*uresp)) { |
378 | err = -EINVAL; | |
64827180 | 379 | rxe_err_dev(rxe, "malformed udata\n"); |
5bf944f2 BP |
380 | goto err_out; |
381 | } | |
0c43ab37 JG |
382 | uresp = udata->outbuf; |
383 | } | |
8700e3e7 | 384 | |
5bf944f2 BP |
385 | if (init->srq_type != IB_SRQT_BASIC) { |
386 | err = -EOPNOTSUPP; | |
64827180 | 387 | rxe_dbg_dev(rxe, "srq type = %d, not supported\n", |
5bf944f2 BP |
388 | init->srq_type); |
389 | goto err_out; | |
390 | } | |
b2a41678 | 391 | |
0b1fbfb9 | 392 | err = rxe_srq_chk_init(rxe, init); |
5bf944f2 | 393 | if (err) { |
64827180 | 394 | rxe_dbg_dev(rxe, "invalid init attributes\n"); |
5bf944f2 BP |
395 | goto err_out; |
396 | } | |
8700e3e7 | 397 | |
91a42c5b | 398 | err = rxe_add_to_pool(&rxe->srq_pool, srq); |
5bf944f2 | 399 | if (err) { |
64827180 | 400 | rxe_dbg_dev(rxe, "unable to create srq, err = %d\n", err); |
5bf944f2 BP |
401 | goto err_out; |
402 | } | |
8700e3e7 | 403 | |
3197706a | 404 | rxe_get(pd); |
8700e3e7 MS |
405 | srq->pd = pd; |
406 | ||
ff23dfa1 | 407 | err = rxe_srq_from_init(rxe, srq, init, udata, uresp); |
5bf944f2 | 408 | if (err) { |
64827180 | 409 | rxe_dbg_srq(srq, "create srq failed, err = %d\n", err); |
215d0a75 | 410 | goto err_cleanup; |
5bf944f2 | 411 | } |
8700e3e7 | 412 | |
68e326de | 413 | return 0; |
8700e3e7 | 414 | |
215d0a75 | 415 | err_cleanup: |
5bf944f2 BP |
416 | cleanup_err = rxe_cleanup(srq); |
417 | if (cleanup_err) | |
64827180 | 418 | rxe_err_srq(srq, "cleanup failed, err = %d\n", cleanup_err); |
5bf944f2 | 419 | err_out: |
64827180 | 420 | rxe_err_dev(rxe, "returned err = %d\n", err); |
68e326de | 421 | return err; |
8700e3e7 MS |
422 | } |
423 | ||
424 | static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | |
425 | enum ib_srq_attr_mask mask, | |
426 | struct ib_udata *udata) | |
427 | { | |
8700e3e7 MS |
428 | struct rxe_srq *srq = to_rsrq(ibsrq); |
429 | struct rxe_dev *rxe = to_rdev(ibsrq->device); | |
5bf944f2 BP |
430 | struct rxe_modify_srq_cmd cmd = {}; |
431 | int err; | |
0c43ab37 JG |
432 | |
433 | if (udata) { | |
5bf944f2 BP |
434 | if (udata->inlen < sizeof(cmd)) { |
435 | err = -EINVAL; | |
64827180 | 436 | rxe_dbg_srq(srq, "malformed udata\n"); |
5bf944f2 BP |
437 | goto err_out; |
438 | } | |
0c43ab37 | 439 | |
5bf944f2 BP |
440 | err = ib_copy_from_udata(&cmd, udata, sizeof(cmd)); |
441 | if (err) { | |
442 | err = -EFAULT; | |
64827180 | 443 | rxe_dbg_srq(srq, "unable to read udata\n"); |
5bf944f2 BP |
444 | goto err_out; |
445 | } | |
0c43ab37 | 446 | } |
8700e3e7 MS |
447 | |
448 | err = rxe_srq_chk_attr(rxe, srq, attr, mask); | |
5bf944f2 | 449 | if (err) { |
64827180 | 450 | rxe_dbg_srq(srq, "bad init attributes\n"); |
5bf944f2 BP |
451 | goto err_out; |
452 | } | |
453 | ||
454 | err = rxe_srq_from_attr(rxe, srq, attr, mask, &cmd, udata); | |
455 | if (err) { | |
64827180 | 456 | rxe_dbg_srq(srq, "bad attr\n"); |
5bf944f2 BP |
457 | goto err_out; |
458 | } | |
459 | ||
460 | return 0; | |
8700e3e7 | 461 | |
5bf944f2 | 462 | err_out: |
64827180 | 463 | rxe_err_srq(srq, "returned err = %d\n", err); |
5bf944f2 | 464 | return err; |
8700e3e7 MS |
465 | } |
466 | ||
467 | static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) | |
468 | { | |
469 | struct rxe_srq *srq = to_rsrq(ibsrq); | |
5bf944f2 | 470 | int err; |
8700e3e7 | 471 | |
5bf944f2 BP |
472 | if (srq->error) { |
473 | err = -EINVAL; | |
64827180 | 474 | rxe_dbg_srq(srq, "srq in error state\n"); |
5bf944f2 BP |
475 | goto err_out; |
476 | } | |
8700e3e7 MS |
477 | |
478 | attr->max_wr = srq->rq.queue->buf->index_mask; | |
479 | attr->max_sge = srq->rq.max_sge; | |
480 | attr->srq_limit = srq->limit; | |
481 | return 0; | |
8700e3e7 | 482 | |
5bf944f2 | 483 | err_out: |
64827180 | 484 | rxe_err_srq(srq, "returned err = %d\n", err); |
5bf944f2 | 485 | return err; |
8700e3e7 MS |
486 | } |
487 | ||
d34ac5cd BVA |
488 | static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, |
489 | const struct ib_recv_wr **bad_wr) | |
8700e3e7 MS |
490 | { |
491 | int err = 0; | |
8700e3e7 | 492 | struct rxe_srq *srq = to_rsrq(ibsrq); |
a099b085 | 493 | unsigned long flags; |
8700e3e7 | 494 | |
a099b085 | 495 | spin_lock_irqsave(&srq->rq.producer_lock, flags); |
8700e3e7 MS |
496 | |
497 | while (wr) { | |
498 | err = post_one_recv(&srq->rq, wr); | |
499 | if (unlikely(err)) | |
500 | break; | |
501 | wr = wr->next; | |
502 | } | |
503 | ||
a099b085 | 504 | spin_unlock_irqrestore(&srq->rq.producer_lock, flags); |
8700e3e7 | 505 | |
5bf944f2 | 506 | if (err) { |
8700e3e7 | 507 | *bad_wr = wr; |
64827180 | 508 | rxe_err_srq(srq, "returned err = %d\n", err); |
5bf944f2 | 509 | } |
8700e3e7 MS |
510 | |
511 | return err; | |
512 | } | |
513 | ||
5bf944f2 BP |
514 | static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) |
515 | { | |
516 | struct rxe_srq *srq = to_rsrq(ibsrq); | |
517 | int err; | |
518 | ||
519 | err = rxe_cleanup(srq); | |
520 | if (err) | |
64827180 | 521 | rxe_err_srq(srq, "cleanup failed, err = %d\n", err); |
5bf944f2 BP |
522 | |
523 | return 0; | |
524 | } | |
525 | ||
526 | /* qp */ | |
514aee66 LR |
527 | static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init, |
528 | struct ib_udata *udata) | |
8700e3e7 | 529 | { |
514aee66 LR |
530 | struct rxe_dev *rxe = to_rdev(ibqp->device); |
531 | struct rxe_pd *pd = to_rpd(ibqp->pd); | |
532 | struct rxe_qp *qp = to_rqp(ibqp); | |
0c43ab37 | 533 | struct rxe_create_qp_resp __user *uresp = NULL; |
5bf944f2 | 534 | int err, cleanup_err; |
0c43ab37 JG |
535 | |
536 | if (udata) { | |
5bf944f2 BP |
537 | if (udata->inlen) { |
538 | err = -EINVAL; | |
64827180 | 539 | rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err); |
5bf944f2 BP |
540 | goto err_out; |
541 | } | |
8700e3e7 | 542 | |
5bf944f2 BP |
543 | if (udata->outlen < sizeof(*uresp)) { |
544 | err = -EINVAL; | |
64827180 | 545 | rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err); |
5bf944f2 BP |
546 | goto err_out; |
547 | } | |
514aee66 | 548 | |
5bcf5a59 | 549 | qp->is_user = true; |
5bf944f2 | 550 | uresp = udata->outbuf; |
5bcf5a59 BP |
551 | } else { |
552 | qp->is_user = false; | |
8700e3e7 MS |
553 | } |
554 | ||
5bf944f2 BP |
555 | if (init->create_flags) { |
556 | err = -EOPNOTSUPP; | |
64827180 | 557 | rxe_dbg_dev(rxe, "unsupported create_flags, err = %d\n", err); |
5bf944f2 BP |
558 | goto err_out; |
559 | } | |
560 | ||
561 | err = rxe_qp_chk_init(rxe, init); | |
562 | if (err) { | |
64827180 | 563 | rxe_dbg_dev(rxe, "bad init attr, err = %d\n", err); |
5bf944f2 BP |
564 | goto err_out; |
565 | } | |
566 | ||
514aee66 | 567 | err = rxe_add_to_pool(&rxe->qp_pool, qp); |
5bf944f2 | 568 | if (err) { |
64827180 | 569 | rxe_dbg_dev(rxe, "unable to create qp, err = %d\n", err); |
5bf944f2 BP |
570 | goto err_out; |
571 | } | |
8700e3e7 | 572 | |
514aee66 | 573 | err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata); |
5bf944f2 | 574 | if (err) { |
64827180 | 575 | rxe_dbg_qp(qp, "create qp failed, err = %d\n", err); |
5bf944f2 BP |
576 | goto err_cleanup; |
577 | } | |
8700e3e7 | 578 | |
215d0a75 | 579 | rxe_finalize(qp); |
514aee66 | 580 | return 0; |
8700e3e7 | 581 | |
5bf944f2 BP |
582 | err_cleanup: |
583 | cleanup_err = rxe_cleanup(qp); | |
584 | if (cleanup_err) | |
64827180 | 585 | rxe_err_qp(qp, "cleanup failed, err = %d\n", cleanup_err); |
5bf944f2 | 586 | err_out: |
64827180 | 587 | rxe_err_dev(rxe, "returned err = %d\n", err); |
514aee66 | 588 | return err; |
8700e3e7 MS |
589 | } |
590 | ||
591 | static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
592 | int mask, struct ib_udata *udata) | |
593 | { | |
8700e3e7 MS |
594 | struct rxe_dev *rxe = to_rdev(ibqp->device); |
595 | struct rxe_qp *qp = to_rqp(ibqp); | |
5bf944f2 | 596 | int err; |
8700e3e7 | 597 | |
5bf944f2 BP |
598 | if (mask & ~IB_QP_ATTR_STANDARD_BITS) { |
599 | err = -EOPNOTSUPP; | |
64827180 | 600 | rxe_dbg_qp(qp, "unsupported mask = 0x%x, err = %d\n", |
5bf944f2 BP |
601 | mask, err); |
602 | goto err_out; | |
603 | } | |
26e990ba | 604 | |
8700e3e7 | 605 | err = rxe_qp_chk_attr(rxe, qp, attr, mask); |
5bf944f2 | 606 | if (err) { |
64827180 | 607 | rxe_dbg_qp(qp, "bad mask/attr, err = %d\n", err); |
5bf944f2 BP |
608 | goto err_out; |
609 | } | |
8700e3e7 MS |
610 | |
611 | err = rxe_qp_from_attr(qp, attr, mask, udata); | |
5bf944f2 | 612 | if (err) { |
64827180 | 613 | rxe_dbg_qp(qp, "modify qp failed, err = %d\n", err); |
5bf944f2 BP |
614 | goto err_out; |
615 | } | |
8700e3e7 | 616 | |
104f062f ZY |
617 | if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH)) |
618 | qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label, | |
619 | qp->ibqp.qp_num, | |
620 | qp->attr.dest_qp_num); | |
621 | ||
8700e3e7 | 622 | return 0; |
5bf944f2 BP |
623 | |
624 | err_out: | |
64827180 | 625 | rxe_err_qp(qp, "returned err = %d\n", err); |
5bf944f2 | 626 | return err; |
8700e3e7 MS |
627 | } |
628 | ||
629 | static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
630 | int mask, struct ib_qp_init_attr *init) | |
631 | { | |
632 | struct rxe_qp *qp = to_rqp(ibqp); | |
633 | ||
634 | rxe_qp_to_init(qp, init); | |
635 | rxe_qp_to_attr(qp, attr, mask); | |
636 | ||
637 | return 0; | |
638 | } | |
639 | ||
c4367a26 | 640 | static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) |
8700e3e7 MS |
641 | { |
642 | struct rxe_qp *qp = to_rqp(ibqp); | |
5bf944f2 BP |
643 | int err; |
644 | ||
645 | err = rxe_qp_chk_destroy(qp); | |
646 | if (err) { | |
64827180 | 647 | rxe_dbg_qp(qp, "unable to destroy qp, err = %d\n", err); |
5bf944f2 BP |
648 | goto err_out; |
649 | } | |
f9f48460 | 650 | |
5bf944f2 BP |
651 | err = rxe_cleanup(qp); |
652 | if (err) | |
64827180 | 653 | rxe_err_qp(qp, "cleanup failed, err = %d\n", err); |
8700e3e7 | 654 | |
8700e3e7 | 655 | return 0; |
5bf944f2 BP |
656 | |
657 | err_out: | |
64827180 | 658 | rxe_err_qp(qp, "returned err = %d\n", err); |
5bf944f2 | 659 | return err; |
8700e3e7 MS |
660 | } |
661 | ||
5bf944f2 | 662 | /* send wr */ |
f605f26e BP |
663 | |
664 | /* sanity check incoming send work request */ | |
f696bf6d | 665 | static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr, |
f605f26e | 666 | unsigned int *maskp, unsigned int *lengthp) |
8700e3e7 MS |
667 | { |
668 | int num_sge = ibwr->num_sge; | |
669 | struct rxe_sq *sq = &qp->sq; | |
f605f26e BP |
670 | unsigned int mask = 0; |
671 | unsigned long length = 0; | |
672 | int err = -EINVAL; | |
673 | int i; | |
8700e3e7 | 674 | |
f605f26e BP |
675 | do { |
676 | mask = wr_opcode_mask(ibwr->opcode, qp); | |
677 | if (!mask) { | |
64827180 | 678 | rxe_err_qp(qp, "bad wr opcode for qp type\n"); |
f605f26e BP |
679 | break; |
680 | } | |
8700e3e7 | 681 | |
f605f26e | 682 | if (num_sge > sq->max_sge) { |
64827180 | 683 | rxe_err_qp(qp, "num_sge > max_sge\n"); |
f605f26e | 684 | break; |
5bf944f2 | 685 | } |
8700e3e7 | 686 | |
f605f26e BP |
687 | length = 0; |
688 | for (i = 0; i < ibwr->num_sge; i++) | |
689 | length += ibwr->sg_list[i].length; | |
690 | ||
691 | if (length > (1UL << 31)) { | |
64827180 | 692 | rxe_err_qp(qp, "message length too long\n"); |
f605f26e | 693 | break; |
5bf944f2 | 694 | } |
8700e3e7 | 695 | |
f605f26e BP |
696 | if (mask & WR_ATOMIC_MASK) { |
697 | if (length != 8) { | |
64827180 | 698 | rxe_err_qp(qp, "atomic length != 8\n"); |
f605f26e BP |
699 | break; |
700 | } | |
701 | if (atomic_wr(ibwr)->remote_addr & 0x7) { | |
64827180 | 702 | rxe_err_qp(qp, "misaligned atomic address\n"); |
f605f26e BP |
703 | break; |
704 | } | |
705 | } | |
706 | if (ibwr->send_flags & IB_SEND_INLINE) { | |
707 | if (!(mask & WR_INLINE_MASK)) { | |
64827180 | 708 | rxe_err_qp(qp, "opcode doesn't support inline data\n"); |
f605f26e BP |
709 | break; |
710 | } | |
711 | if (length > sq->max_inline) { | |
64827180 | 712 | rxe_err_qp(qp, "inline length too big\n"); |
f605f26e BP |
713 | break; |
714 | } | |
715 | } | |
8700e3e7 | 716 | |
f605f26e BP |
717 | err = 0; |
718 | } while (0); | |
5bf944f2 | 719 | |
f605f26e BP |
720 | *maskp = mask; |
721 | *lengthp = (int)length; | |
722 | ||
723 | return err; | |
8700e3e7 MS |
724 | } |
725 | ||
f605f26e | 726 | static int init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, |
f696bf6d | 727 | const struct ib_send_wr *ibwr) |
8700e3e7 MS |
728 | { |
729 | wr->wr_id = ibwr->wr_id; | |
8700e3e7 MS |
730 | wr->opcode = ibwr->opcode; |
731 | wr->send_flags = ibwr->send_flags; | |
732 | ||
733 | if (qp_type(qp) == IB_QPT_UD || | |
8700e3e7 | 734 | qp_type(qp) == IB_QPT_GSI) { |
3b87e082 BP |
735 | struct ib_ah *ibah = ud_wr(ibwr)->ah; |
736 | ||
8700e3e7 MS |
737 | wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn; |
738 | wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey; | |
3b87e082 | 739 | wr->wr.ud.ah_num = to_rah(ibah)->ah_num; |
8700e3e7 MS |
740 | if (qp_type(qp) == IB_QPT_GSI) |
741 | wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index; | |
f605f26e BP |
742 | |
743 | switch (wr->opcode) { | |
744 | case IB_WR_SEND_WITH_IMM: | |
8700e3e7 | 745 | wr->ex.imm_data = ibwr->ex.imm_data; |
f605f26e BP |
746 | break; |
747 | case IB_WR_SEND: | |
748 | break; | |
749 | default: | |
64827180 | 750 | rxe_err_qp(qp, "bad wr opcode %d for UD/GSI QP\n", |
f605f26e BP |
751 | wr->opcode); |
752 | return -EINVAL; | |
753 | } | |
8700e3e7 MS |
754 | } else { |
755 | switch (wr->opcode) { | |
756 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
757 | wr->ex.imm_data = ibwr->ex.imm_data; | |
df561f66 | 758 | fallthrough; |
8700e3e7 MS |
759 | case IB_WR_RDMA_READ: |
760 | case IB_WR_RDMA_WRITE: | |
761 | wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr; | |
762 | wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey; | |
763 | break; | |
764 | case IB_WR_SEND_WITH_IMM: | |
765 | wr->ex.imm_data = ibwr->ex.imm_data; | |
766 | break; | |
767 | case IB_WR_SEND_WITH_INV: | |
768 | wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey; | |
769 | break; | |
f605f26e BP |
770 | case IB_WR_RDMA_READ_WITH_INV: |
771 | wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey; | |
772 | wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr; | |
773 | wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey; | |
774 | break; | |
8700e3e7 MS |
775 | case IB_WR_ATOMIC_CMP_AND_SWP: |
776 | case IB_WR_ATOMIC_FETCH_AND_ADD: | |
777 | wr->wr.atomic.remote_addr = | |
778 | atomic_wr(ibwr)->remote_addr; | |
779 | wr->wr.atomic.compare_add = | |
780 | atomic_wr(ibwr)->compare_add; | |
781 | wr->wr.atomic.swap = atomic_wr(ibwr)->swap; | |
782 | wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey; | |
783 | break; | |
784 | case IB_WR_LOCAL_INV: | |
785 | wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey; | |
5bf944f2 | 786 | break; |
8700e3e7 MS |
787 | case IB_WR_REG_MR: |
788 | wr->wr.reg.mr = reg_wr(ibwr)->mr; | |
789 | wr->wr.reg.key = reg_wr(ibwr)->key; | |
790 | wr->wr.reg.access = reg_wr(ibwr)->access; | |
5bf944f2 | 791 | break; |
f605f26e BP |
792 | case IB_WR_SEND: |
793 | case IB_WR_BIND_MW: | |
794 | case IB_WR_FLUSH: | |
795 | case IB_WR_ATOMIC_WRITE: | |
796 | break; | |
8700e3e7 | 797 | default: |
64827180 | 798 | rxe_err_qp(qp, "unsupported wr opcode %d\n", |
f605f26e BP |
799 | wr->opcode); |
800 | return -EINVAL; | |
8700e3e7 MS |
801 | } |
802 | } | |
f605f26e BP |
803 | |
804 | return 0; | |
8700e3e7 MS |
805 | } |
806 | ||
086f580c BP |
807 | static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe, |
808 | const struct ib_send_wr *ibwr) | |
809 | { | |
810 | struct ib_sge *sge = ibwr->sg_list; | |
811 | u8 *p = wqe->dma.inline_data; | |
812 | int i; | |
813 | ||
814 | for (i = 0; i < ibwr->num_sge; i++, sge++) { | |
8d7c7c0e | 815 | memcpy(p, ib_virt_dma_to_page(sge->addr), sge->length); |
086f580c BP |
816 | p += sge->length; |
817 | } | |
818 | } | |
819 | ||
f605f26e | 820 | static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr, |
8700e3e7 MS |
821 | unsigned int mask, unsigned int length, |
822 | struct rxe_send_wqe *wqe) | |
823 | { | |
824 | int num_sge = ibwr->num_sge; | |
f605f26e | 825 | int err; |
8700e3e7 | 826 | |
f605f26e BP |
827 | err = init_send_wr(qp, &wqe->wr, ibwr); |
828 | if (err) | |
829 | return err; | |
8700e3e7 | 830 | |
dc78074a | 831 | /* local operation */ |
886441fb | 832 | if (unlikely(mask & WR_LOCAL_OP_MASK)) { |
dc78074a BP |
833 | wqe->mask = mask; |
834 | wqe->state = wqe_state_posted; | |
f605f26e | 835 | return 0; |
dc78074a BP |
836 | } |
837 | ||
086f580c BP |
838 | if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) |
839 | copy_inline_data_to_wqe(wqe, ibwr); | |
840 | else | |
8700e3e7 MS |
841 | memcpy(wqe->dma.sge, ibwr->sg_list, |
842 | num_sge * sizeof(struct ib_sge)); | |
843 | ||
a6544a62 BVA |
844 | wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr : |
845 | mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0; | |
8700e3e7 MS |
846 | wqe->mask = mask; |
847 | wqe->dma.length = length; | |
848 | wqe->dma.resid = length; | |
849 | wqe->dma.num_sge = num_sge; | |
850 | wqe->dma.cur_sge = 0; | |
851 | wqe->dma.sge_offset = 0; | |
852 | wqe->state = wqe_state_posted; | |
853 | wqe->ssn = atomic_add_return(1, &qp->ssn); | |
f605f26e BP |
854 | |
855 | return 0; | |
8700e3e7 MS |
856 | } |
857 | ||
f605f26e | 858 | static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr) |
8700e3e7 MS |
859 | { |
860 | int err; | |
861 | struct rxe_sq *sq = &qp->sq; | |
862 | struct rxe_send_wqe *send_wqe; | |
f605f26e BP |
863 | unsigned int mask; |
864 | unsigned int length; | |
5bcf5a59 | 865 | int full; |
8700e3e7 | 866 | |
f605f26e | 867 | err = validate_send_wr(qp, ibwr, &mask, &length); |
8700e3e7 MS |
868 | if (err) |
869 | return err; | |
870 | ||
a77a5238 | 871 | full = queue_full(sq->queue, QUEUE_TYPE_FROM_ULP); |
5bcf5a59 | 872 | if (unlikely(full)) { |
64827180 | 873 | rxe_err_qp(qp, "send queue full\n"); |
5bcf5a59 | 874 | return -ENOMEM; |
8700e3e7 MS |
875 | } |
876 | ||
a77a5238 | 877 | send_wqe = queue_producer_addr(sq->queue, QUEUE_TYPE_FROM_ULP); |
f605f26e BP |
878 | err = init_send_wqe(qp, ibwr, mask, length, send_wqe); |
879 | if (!err) | |
880 | queue_advance_producer(sq->queue, QUEUE_TYPE_FROM_ULP); | |
8700e3e7 | 881 | |
f605f26e | 882 | return err; |
8700e3e7 MS |
883 | } |
884 | ||
f605f26e BP |
885 | static int rxe_post_send_kernel(struct rxe_qp *qp, |
886 | const struct ib_send_wr *ibwr, | |
d34ac5cd | 887 | const struct ib_send_wr **bad_wr) |
8700e3e7 MS |
888 | { |
889 | int err = 0; | |
f605f26e | 890 | unsigned long flags; |
b7033748 | 891 | int good = 0; |
8700e3e7 | 892 | |
f605f26e BP |
893 | spin_lock_irqsave(&qp->sq.sq_lock, flags); |
894 | while (ibwr) { | |
895 | err = post_one_send(qp, ibwr); | |
8700e3e7 | 896 | if (err) { |
f605f26e | 897 | *bad_wr = ibwr; |
8700e3e7 | 898 | break; |
b7033748 BP |
899 | } else { |
900 | good++; | |
8700e3e7 | 901 | } |
f605f26e | 902 | ibwr = ibwr->next; |
8700e3e7 | 903 | } |
f605f26e | 904 | spin_unlock_irqrestore(&qp->sq.sq_lock, flags); |
8700e3e7 | 905 | |
b7033748 BP |
906 | /* kickoff processing of any posted wqes */ |
907 | if (good) | |
67f57892 | 908 | rxe_sched_task(&qp->send_task); |
5bf944f2 | 909 | |
8700e3e7 MS |
910 | return err; |
911 | } | |
912 | ||
d34ac5cd BVA |
913 | static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, |
914 | const struct ib_send_wr **bad_wr) | |
063af595 PP |
915 | { |
916 | struct rxe_qp *qp = to_rqp(ibqp); | |
5bf944f2 | 917 | int err; |
b5f3fe27 | 918 | unsigned long flags; |
063af595 | 919 | |
b5f3fe27 | 920 | spin_lock_irqsave(&qp->state_lock, flags); |
f605f26e BP |
921 | /* caller has already called destroy_qp */ |
922 | if (WARN_ON_ONCE(!qp->valid)) { | |
b5f3fe27 | 923 | spin_unlock_irqrestore(&qp->state_lock, flags); |
64827180 | 924 | rxe_err_qp(qp, "qp has been destroyed\n"); |
f605f26e | 925 | return -EINVAL; |
063af595 PP |
926 | } |
927 | ||
98e891b5 | 928 | if (unlikely(qp_state(qp) < IB_QPS_RTS)) { |
b5f3fe27 | 929 | spin_unlock_irqrestore(&qp->state_lock, flags); |
063af595 | 930 | *bad_wr = wr; |
64827180 | 931 | rxe_err_qp(qp, "qp not ready to send\n"); |
f605f26e | 932 | return -EINVAL; |
063af595 | 933 | } |
b5f3fe27 | 934 | spin_unlock_irqrestore(&qp->state_lock, flags); |
063af595 PP |
935 | |
936 | if (qp->is_user) { | |
937 | /* Utilize process context to do protocol processing */ | |
23bc06af | 938 | rxe_sched_task(&qp->send_task); |
5bf944f2 BP |
939 | } else { |
940 | err = rxe_post_send_kernel(qp, wr, bad_wr); | |
941 | if (err) | |
f605f26e | 942 | return err; |
5bf944f2 BP |
943 | } |
944 | ||
945 | return 0; | |
5bf944f2 BP |
946 | } |
947 | ||
948 | /* recv wr */ | |
949 | static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) | |
950 | { | |
951 | int i; | |
952 | unsigned long length; | |
953 | struct rxe_recv_wqe *recv_wqe; | |
954 | int num_sge = ibwr->num_sge; | |
955 | int full; | |
956 | int err; | |
957 | ||
958 | full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP); | |
959 | if (unlikely(full)) { | |
960 | err = -ENOMEM; | |
64827180 | 961 | rxe_dbg("queue full\n"); |
5bf944f2 BP |
962 | goto err_out; |
963 | } | |
964 | ||
965 | if (unlikely(num_sge > rq->max_sge)) { | |
966 | err = -EINVAL; | |
64827180 | 967 | rxe_dbg("bad num_sge > max_sge\n"); |
5bf944f2 BP |
968 | goto err_out; |
969 | } | |
970 | ||
971 | length = 0; | |
972 | for (i = 0; i < num_sge; i++) | |
973 | length += ibwr->sg_list[i].length; | |
974 | ||
975 | /* IBA max message size is 2^31 */ | |
976 | if (length >= (1UL<<31)) { | |
977 | err = -EINVAL; | |
64827180 | 978 | rxe_dbg("message length too long\n"); |
5bf944f2 BP |
979 | goto err_out; |
980 | } | |
981 | ||
982 | recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_FROM_ULP); | |
983 | ||
984 | recv_wqe->wr_id = ibwr->wr_id; | |
985 | recv_wqe->dma.length = length; | |
986 | recv_wqe->dma.resid = length; | |
987 | recv_wqe->dma.num_sge = num_sge; | |
988 | recv_wqe->dma.cur_sge = 0; | |
989 | recv_wqe->dma.sge_offset = 0; | |
990 | memcpy(recv_wqe->dma.sge, ibwr->sg_list, | |
991 | num_sge * sizeof(struct ib_sge)); | |
992 | ||
993 | queue_advance_producer(rq->queue, QUEUE_TYPE_FROM_ULP); | |
994 | ||
995 | return 0; | |
996 | ||
997 | err_out: | |
64827180 | 998 | rxe_dbg("returned err = %d\n", err); |
5bf944f2 | 999 | return err; |
063af595 PP |
1000 | } |
1001 | ||
d34ac5cd BVA |
1002 | static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, |
1003 | const struct ib_recv_wr **bad_wr) | |
8700e3e7 MS |
1004 | { |
1005 | int err = 0; | |
1006 | struct rxe_qp *qp = to_rqp(ibqp); | |
1007 | struct rxe_rq *rq = &qp->rq; | |
a099b085 | 1008 | unsigned long flags; |
8700e3e7 | 1009 | |
b5f3fe27 | 1010 | spin_lock_irqsave(&qp->state_lock, flags); |
f605f26e BP |
1011 | /* caller has already called destroy_qp */ |
1012 | if (WARN_ON_ONCE(!qp->valid)) { | |
b5f3fe27 | 1013 | spin_unlock_irqrestore(&qp->state_lock, flags); |
64827180 | 1014 | rxe_err_qp(qp, "qp has been destroyed\n"); |
f605f26e BP |
1015 | return -EINVAL; |
1016 | } | |
1017 | ||
1018 | /* see C10-97.2.1 */ | |
1019 | if (unlikely((qp_state(qp) < IB_QPS_INIT))) { | |
b5f3fe27 | 1020 | spin_unlock_irqrestore(&qp->state_lock, flags); |
8700e3e7 | 1021 | *bad_wr = wr; |
64827180 | 1022 | rxe_dbg_qp(qp, "qp not ready to post recv\n"); |
f605f26e | 1023 | return -EINVAL; |
8700e3e7 | 1024 | } |
b5f3fe27 | 1025 | spin_unlock_irqrestore(&qp->state_lock, flags); |
8700e3e7 MS |
1026 | |
1027 | if (unlikely(qp->srq)) { | |
1028 | *bad_wr = wr; | |
64827180 | 1029 | rxe_dbg_qp(qp, "qp has srq, use post_srq_recv instead\n"); |
f605f26e | 1030 | return -EINVAL; |
8700e3e7 MS |
1031 | } |
1032 | ||
a099b085 | 1033 | spin_lock_irqsave(&rq->producer_lock, flags); |
8700e3e7 MS |
1034 | |
1035 | while (wr) { | |
1036 | err = post_one_recv(rq, wr); | |
1037 | if (unlikely(err)) { | |
1038 | *bad_wr = wr; | |
1039 | break; | |
1040 | } | |
1041 | wr = wr->next; | |
1042 | } | |
1043 | ||
a099b085 | 1044 | spin_unlock_irqrestore(&rq->producer_lock, flags); |
8700e3e7 | 1045 | |
b5f3fe27 | 1046 | spin_lock_irqsave(&qp->state_lock, flags); |
a588429a | 1047 | if (qp_state(qp) == IB_QPS_ERR) |
67f57892 | 1048 | rxe_sched_task(&qp->recv_task); |
b5f3fe27 | 1049 | spin_unlock_irqrestore(&qp->state_lock, flags); |
5bf944f2 | 1050 | |
8700e3e7 MS |
1051 | return err; |
1052 | } | |
1053 | ||
5bf944f2 | 1054 | /* cq */ |
e39afe3d LR |
1055 | static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, |
1056 | struct ib_udata *udata) | |
8700e3e7 | 1057 | { |
e39afe3d | 1058 | struct ib_device *dev = ibcq->device; |
8700e3e7 | 1059 | struct rxe_dev *rxe = to_rdev(dev); |
e39afe3d | 1060 | struct rxe_cq *cq = to_rcq(ibcq); |
0c43ab37 | 1061 | struct rxe_create_cq_resp __user *uresp = NULL; |
5bf944f2 | 1062 | int err, cleanup_err; |
0c43ab37 JG |
1063 | |
1064 | if (udata) { | |
5bf944f2 BP |
1065 | if (udata->outlen < sizeof(*uresp)) { |
1066 | err = -EINVAL; | |
64827180 | 1067 | rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err); |
5bf944f2 BP |
1068 | goto err_out; |
1069 | } | |
0c43ab37 JG |
1070 | uresp = udata->outbuf; |
1071 | } | |
8700e3e7 | 1072 | |
5bf944f2 BP |
1073 | if (attr->flags) { |
1074 | err = -EOPNOTSUPP; | |
64827180 | 1075 | rxe_dbg_dev(rxe, "bad attr->flags, err = %d\n", err); |
5bf944f2 BP |
1076 | goto err_out; |
1077 | } | |
8700e3e7 | 1078 | |
b92ec0fe | 1079 | err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector); |
5bf944f2 | 1080 | if (err) { |
64827180 | 1081 | rxe_dbg_dev(rxe, "bad init attributes, err = %d\n", err); |
5bf944f2 BP |
1082 | goto err_out; |
1083 | } | |
1084 | ||
1085 | err = rxe_add_to_pool(&rxe->cq_pool, cq); | |
1086 | if (err) { | |
64827180 | 1087 | rxe_dbg_dev(rxe, "unable to create cq, err = %d\n", err); |
5bf944f2 BP |
1088 | goto err_out; |
1089 | } | |
8700e3e7 | 1090 | |
ff23dfa1 SR |
1091 | err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata, |
1092 | uresp); | |
5bf944f2 | 1093 | if (err) { |
64827180 | 1094 | rxe_dbg_cq(cq, "create cq failed, err = %d\n", err); |
5bf944f2 BP |
1095 | goto err_cleanup; |
1096 | } | |
bfc3ae05 | 1097 | |
43d781b9 | 1098 | return 0; |
5bf944f2 BP |
1099 | |
1100 | err_cleanup: | |
1101 | cleanup_err = rxe_cleanup(cq); | |
1102 | if (cleanup_err) | |
64827180 | 1103 | rxe_err_cq(cq, "cleanup failed, err = %d\n", cleanup_err); |
5bf944f2 | 1104 | err_out: |
64827180 | 1105 | rxe_err_dev(rxe, "returned err = %d\n", err); |
5bf944f2 | 1106 | return err; |
8700e3e7 MS |
1107 | } |
1108 | ||
1109 | static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) | |
1110 | { | |
8700e3e7 MS |
1111 | struct rxe_cq *cq = to_rcq(ibcq); |
1112 | struct rxe_dev *rxe = to_rdev(ibcq->device); | |
0c43ab37 | 1113 | struct rxe_resize_cq_resp __user *uresp = NULL; |
5bf944f2 | 1114 | int err; |
0c43ab37 JG |
1115 | |
1116 | if (udata) { | |
5bf944f2 BP |
1117 | if (udata->outlen < sizeof(*uresp)) { |
1118 | err = -EINVAL; | |
64827180 | 1119 | rxe_dbg_cq(cq, "malformed udata\n"); |
5bf944f2 BP |
1120 | goto err_out; |
1121 | } | |
0c43ab37 JG |
1122 | uresp = udata->outbuf; |
1123 | } | |
8700e3e7 | 1124 | |
b92ec0fe | 1125 | err = rxe_cq_chk_attr(rxe, cq, cqe, 0); |
5bf944f2 | 1126 | if (err) { |
64827180 | 1127 | rxe_dbg_cq(cq, "bad attr, err = %d\n", err); |
5bf944f2 BP |
1128 | goto err_out; |
1129 | } | |
8700e3e7 | 1130 | |
5bf944f2 BP |
1131 | err = rxe_cq_resize_queue(cq, cqe, uresp, udata); |
1132 | if (err) { | |
64827180 | 1133 | rxe_dbg_cq(cq, "resize cq failed, err = %d\n", err); |
5bf944f2 BP |
1134 | goto err_out; |
1135 | } | |
1136 | ||
1137 | return 0; | |
1138 | ||
1139 | err_out: | |
64827180 | 1140 | rxe_err_cq(cq, "returned err = %d\n", err); |
5bf944f2 | 1141 | return err; |
8700e3e7 MS |
1142 | } |
1143 | ||
1144 | static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |
1145 | { | |
1146 | int i; | |
1147 | struct rxe_cq *cq = to_rcq(ibcq); | |
1148 | struct rxe_cqe *cqe; | |
a099b085 | 1149 | unsigned long flags; |
8700e3e7 | 1150 | |
a099b085 | 1151 | spin_lock_irqsave(&cq->cq_lock, flags); |
8700e3e7 | 1152 | for (i = 0; i < num_entries; i++) { |
a77a5238 | 1153 | cqe = queue_head(cq->queue, QUEUE_TYPE_TO_ULP); |
8700e3e7 | 1154 | if (!cqe) |
5bf944f2 | 1155 | break; /* queue empty */ |
8700e3e7 MS |
1156 | |
1157 | memcpy(wc++, &cqe->ibwc, sizeof(*wc)); | |
a77a5238 | 1158 | queue_advance_consumer(cq->queue, QUEUE_TYPE_TO_ULP); |
8700e3e7 | 1159 | } |
a099b085 | 1160 | spin_unlock_irqrestore(&cq->cq_lock, flags); |
8700e3e7 MS |
1161 | |
1162 | return i; | |
1163 | } | |
1164 | ||
1165 | static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt) | |
1166 | { | |
1167 | struct rxe_cq *cq = to_rcq(ibcq); | |
5bcf5a59 BP |
1168 | int count; |
1169 | ||
a77a5238 | 1170 | count = queue_count(cq->queue, QUEUE_TYPE_TO_ULP); |
8700e3e7 MS |
1171 | |
1172 | return (count > wc_cnt) ? wc_cnt : count; | |
1173 | } | |
1174 | ||
1175 | static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) | |
1176 | { | |
1177 | struct rxe_cq *cq = to_rcq(ibcq); | |
accacb8f | 1178 | int ret = 0; |
5bcf5a59 | 1179 | int empty; |
a099b085 | 1180 | unsigned long irq_flags; |
8700e3e7 | 1181 | |
a099b085 | 1182 | spin_lock_irqsave(&cq->cq_lock, irq_flags); |
350b6dd4 | 1183 | cq->notify |= flags & IB_CQ_SOLICITED_MASK; |
a77a5238 | 1184 | empty = queue_empty(cq->queue, QUEUE_TYPE_TO_ULP); |
5bcf5a59 BP |
1185 | |
1186 | if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty) | |
accacb8f AB |
1187 | ret = 1; |
1188 | ||
a099b085 | 1189 | spin_unlock_irqrestore(&cq->cq_lock, irq_flags); |
accacb8f AB |
1190 | |
1191 | return ret; | |
8700e3e7 MS |
1192 | } |
1193 | ||
5bf944f2 BP |
1194 | static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) |
1195 | { | |
1196 | struct rxe_cq *cq = to_rcq(ibcq); | |
1197 | int err; | |
1198 | ||
1199 | /* See IBA C11-17: The CI shall return an error if this Verb is | |
1200 | * invoked while a Work Queue is still associated with the CQ. | |
1201 | */ | |
1202 | if (atomic_read(&cq->num_wq)) { | |
1203 | err = -EINVAL; | |
64827180 | 1204 | rxe_dbg_cq(cq, "still in use\n"); |
5bf944f2 BP |
1205 | goto err_out; |
1206 | } | |
1207 | ||
5bf944f2 BP |
1208 | err = rxe_cleanup(cq); |
1209 | if (err) | |
64827180 | 1210 | rxe_err_cq(cq, "cleanup failed, err = %d\n", err); |
5bf944f2 BP |
1211 | |
1212 | return 0; | |
1213 | ||
1214 | err_out: | |
64827180 | 1215 | rxe_err_cq(cq, "returned err = %d\n", err); |
5bf944f2 BP |
1216 | return err; |
1217 | } | |
1218 | ||
1219 | /* mr */ | |
8700e3e7 MS |
1220 | static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) |
1221 | { | |
1222 | struct rxe_dev *rxe = to_rdev(ibpd->device); | |
1223 | struct rxe_pd *pd = to_rpd(ibpd); | |
364e282c | 1224 | struct rxe_mr *mr; |
72a03627 | 1225 | int err; |
8700e3e7 | 1226 | |
72a03627 | 1227 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
b6ba6855 LR |
1228 | if (!mr) |
1229 | return ERR_PTR(-ENOMEM); | |
72a03627 BP |
1230 | |
1231 | err = rxe_add_to_pool(&rxe->mr_pool, mr); | |
5bf944f2 | 1232 | if (err) { |
64827180 | 1233 | rxe_dbg_dev(rxe, "unable to create mr\n"); |
72a03627 | 1234 | goto err_free; |
5bf944f2 | 1235 | } |
8700e3e7 | 1236 | |
3197706a | 1237 | rxe_get(pd); |
58651bbb | 1238 | mr->ibmr.pd = ibpd; |
2778b72b | 1239 | mr->ibmr.device = ibpd->device; |
58651bbb BP |
1240 | |
1241 | rxe_mr_init_dma(access, mr); | |
215d0a75 | 1242 | rxe_finalize(mr); |
8700e3e7 | 1243 | return &mr->ibmr; |
72a03627 BP |
1244 | |
1245 | err_free: | |
1246 | kfree(mr); | |
64827180 | 1247 | rxe_err_pd(pd, "returned err = %d\n", err); |
72a03627 | 1248 | return ERR_PTR(err); |
8700e3e7 MS |
1249 | } |
1250 | ||
5bf944f2 BP |
1251 | static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start, |
1252 | u64 length, u64 iova, int access, | |
1253 | struct ib_udata *udata) | |
8700e3e7 | 1254 | { |
8700e3e7 MS |
1255 | struct rxe_dev *rxe = to_rdev(ibpd->device); |
1256 | struct rxe_pd *pd = to_rpd(ibpd); | |
364e282c | 1257 | struct rxe_mr *mr; |
5bf944f2 | 1258 | int err, cleanup_err; |
8700e3e7 | 1259 | |
02ed2537 | 1260 | if (access & ~RXE_ACCESS_SUPPORTED_MR) { |
64827180 | 1261 | rxe_err_pd(pd, "access = %#x not supported (%#x)\n", access, |
02ed2537 BP |
1262 | RXE_ACCESS_SUPPORTED_MR); |
1263 | return ERR_PTR(-EOPNOTSUPP); | |
1264 | } | |
1265 | ||
72a03627 | 1266 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
b6ba6855 LR |
1267 | if (!mr) |
1268 | return ERR_PTR(-ENOMEM); | |
72a03627 BP |
1269 | |
1270 | err = rxe_add_to_pool(&rxe->mr_pool, mr); | |
5bf944f2 | 1271 | if (err) { |
64827180 | 1272 | rxe_dbg_pd(pd, "unable to create mr\n"); |
72a03627 | 1273 | goto err_free; |
5bf944f2 | 1274 | } |
8700e3e7 | 1275 | |
3197706a | 1276 | rxe_get(pd); |
58651bbb | 1277 | mr->ibmr.pd = ibpd; |
2778b72b | 1278 | mr->ibmr.device = ibpd->device; |
8700e3e7 | 1279 | |
aafe4cc5 | 1280 | err = rxe_mr_init_user(rxe, start, length, access, mr); |
5bf944f2 | 1281 | if (err) { |
64827180 | 1282 | rxe_dbg_mr(mr, "reg_user_mr failed, err = %d\n", err); |
72a03627 | 1283 | goto err_cleanup; |
5bf944f2 | 1284 | } |
8700e3e7 | 1285 | |
215d0a75 | 1286 | rxe_finalize(mr); |
8700e3e7 MS |
1287 | return &mr->ibmr; |
1288 | ||
72a03627 | 1289 | err_cleanup: |
5bf944f2 BP |
1290 | cleanup_err = rxe_cleanup(mr); |
1291 | if (cleanup_err) | |
64827180 | 1292 | rxe_err_mr(mr, "cleanup failed, err = %d\n", cleanup_err); |
72a03627 BP |
1293 | err_free: |
1294 | kfree(mr); | |
64827180 | 1295 | rxe_err_pd(pd, "returned err = %d\n", err); |
8700e3e7 MS |
1296 | return ERR_PTR(err); |
1297 | } | |
1298 | ||
544c7f62 BP |
1299 | static struct ib_mr *rxe_rereg_user_mr(struct ib_mr *ibmr, int flags, |
1300 | u64 start, u64 length, u64 iova, | |
1301 | int access, struct ib_pd *ibpd, | |
1302 | struct ib_udata *udata) | |
1303 | { | |
1304 | struct rxe_mr *mr = to_rmr(ibmr); | |
1305 | struct rxe_pd *old_pd = to_rpd(ibmr->pd); | |
1306 | struct rxe_pd *pd = to_rpd(ibpd); | |
1307 | ||
1308 | /* for now only support the two easy cases: | |
1309 | * rereg_pd and rereg_access | |
1310 | */ | |
1311 | if (flags & ~RXE_MR_REREG_SUPPORTED) { | |
64827180 | 1312 | rxe_err_mr(mr, "flags = %#x not supported\n", flags); |
544c7f62 BP |
1313 | return ERR_PTR(-EOPNOTSUPP); |
1314 | } | |
1315 | ||
1316 | if (flags & IB_MR_REREG_PD) { | |
1317 | rxe_put(old_pd); | |
1318 | rxe_get(pd); | |
1319 | mr->ibmr.pd = ibpd; | |
1320 | } | |
1321 | ||
1322 | if (flags & IB_MR_REREG_ACCESS) { | |
1323 | if (access & ~RXE_ACCESS_SUPPORTED_MR) { | |
64827180 | 1324 | rxe_err_mr(mr, "access = %#x not supported\n", access); |
544c7f62 BP |
1325 | return ERR_PTR(-EOPNOTSUPP); |
1326 | } | |
1327 | mr->access = access; | |
1328 | } | |
1329 | ||
1330 | return NULL; | |
1331 | } | |
1332 | ||
c4367a26 | 1333 | static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, |
42a3b153 | 1334 | u32 max_num_sg) |
8700e3e7 MS |
1335 | { |
1336 | struct rxe_dev *rxe = to_rdev(ibpd->device); | |
1337 | struct rxe_pd *pd = to_rpd(ibpd); | |
364e282c | 1338 | struct rxe_mr *mr; |
5bf944f2 | 1339 | int err, cleanup_err; |
8700e3e7 | 1340 | |
5bf944f2 BP |
1341 | if (mr_type != IB_MR_TYPE_MEM_REG) { |
1342 | err = -EINVAL; | |
64827180 | 1343 | rxe_dbg_pd(pd, "mr type %d not supported, err = %d\n", |
5bf944f2 BP |
1344 | mr_type, err); |
1345 | goto err_out; | |
1346 | } | |
8700e3e7 | 1347 | |
72a03627 | 1348 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
b6ba6855 LR |
1349 | if (!mr) |
1350 | return ERR_PTR(-ENOMEM); | |
72a03627 BP |
1351 | |
1352 | err = rxe_add_to_pool(&rxe->mr_pool, mr); | |
b6ba6855 | 1353 | if (err) |
72a03627 | 1354 | goto err_free; |
8700e3e7 | 1355 | |
3197706a | 1356 | rxe_get(pd); |
58651bbb | 1357 | mr->ibmr.pd = ibpd; |
2778b72b | 1358 | mr->ibmr.device = ibpd->device; |
8700e3e7 | 1359 | |
58651bbb | 1360 | err = rxe_mr_init_fast(max_num_sg, mr); |
5bf944f2 | 1361 | if (err) { |
64827180 | 1362 | rxe_dbg_mr(mr, "alloc_mr failed, err = %d\n", err); |
72a03627 | 1363 | goto err_cleanup; |
5bf944f2 | 1364 | } |
8700e3e7 | 1365 | |
215d0a75 | 1366 | rxe_finalize(mr); |
8700e3e7 MS |
1367 | return &mr->ibmr; |
1368 | ||
72a03627 | 1369 | err_cleanup: |
5bf944f2 BP |
1370 | cleanup_err = rxe_cleanup(mr); |
1371 | if (cleanup_err) | |
64827180 | 1372 | rxe_err_mr(mr, "cleanup failed, err = %d\n", err); |
72a03627 BP |
1373 | err_free: |
1374 | kfree(mr); | |
1375 | err_out: | |
64827180 | 1376 | rxe_err_pd(pd, "returned err = %d\n", err); |
8700e3e7 MS |
1377 | return ERR_PTR(err); |
1378 | } | |
1379 | ||
5bf944f2 BP |
1380 | static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) |
1381 | { | |
1382 | struct rxe_mr *mr = to_rmr(ibmr); | |
1383 | int err, cleanup_err; | |
1384 | ||
1385 | /* See IBA 10.6.7.2.6 */ | |
1386 | if (atomic_read(&mr->num_mw) > 0) { | |
1387 | err = -EINVAL; | |
64827180 | 1388 | rxe_dbg_mr(mr, "mr has mw's bound\n"); |
5bf944f2 BP |
1389 | goto err_out; |
1390 | } | |
1391 | ||
1392 | cleanup_err = rxe_cleanup(mr); | |
1393 | if (cleanup_err) | |
64827180 | 1394 | rxe_err_mr(mr, "cleanup failed, err = %d\n", cleanup_err); |
5bf944f2 | 1395 | |
af96134d | 1396 | kfree_rcu_mightsleep(mr); |
5bf944f2 BP |
1397 | return 0; |
1398 | ||
1399 | err_out: | |
64827180 | 1400 | rxe_err_mr(mr, "returned err = %d\n", err); |
5bf944f2 BP |
1401 | return err; |
1402 | } | |
1403 | ||
c05d2664 KH |
1404 | static ssize_t parent_show(struct device *device, |
1405 | struct device_attribute *attr, char *buf) | |
8700e3e7 | 1406 | { |
54747231 PP |
1407 | struct rxe_dev *rxe = |
1408 | rdma_device_to_drv_device(device, struct rxe_dev, ib_dev); | |
8700e3e7 | 1409 | |
1c7fd726 | 1410 | return sysfs_emit(buf, "%s\n", rxe_parent_name(rxe, 1)); |
8700e3e7 MS |
1411 | } |
1412 | ||
c05d2664 | 1413 | static DEVICE_ATTR_RO(parent); |
8700e3e7 | 1414 | |
508a523f PP |
1415 | static struct attribute *rxe_dev_attributes[] = { |
1416 | &dev_attr_parent.attr, | |
1417 | NULL | |
1418 | }; | |
1419 | ||
1420 | static const struct attribute_group rxe_attr_group = { | |
1421 | .attrs = rxe_dev_attributes, | |
8700e3e7 MS |
1422 | }; |
1423 | ||
ca22354b JG |
1424 | static int rxe_enable_driver(struct ib_device *ib_dev) |
1425 | { | |
1426 | struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev); | |
1427 | ||
1428 | rxe_set_port_state(rxe); | |
1429 | dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(rxe->ndev)); | |
1430 | return 0; | |
1431 | } | |
1432 | ||
573efc4b | 1433 | static const struct ib_device_ops rxe_dev_ops = { |
7a154142 | 1434 | .owner = THIS_MODULE, |
b9560a41 | 1435 | .driver_id = RDMA_DRIVER_RXE, |
72c6ec18 | 1436 | .uverbs_abi_ver = RXE_UVERBS_ABI_VERSION, |
b9560a41 | 1437 | |
4b5f4d3f | 1438 | .alloc_hw_port_stats = rxe_ib_alloc_hw_port_stats, |
573efc4b | 1439 | .alloc_mr = rxe_alloc_mr, |
beec0239 | 1440 | .alloc_mw = rxe_alloc_mw, |
573efc4b KH |
1441 | .alloc_pd = rxe_alloc_pd, |
1442 | .alloc_ucontext = rxe_alloc_ucontext, | |
1443 | .attach_mcast = rxe_attach_mcast, | |
1444 | .create_ah = rxe_create_ah, | |
1445 | .create_cq = rxe_create_cq, | |
1446 | .create_qp = rxe_create_qp, | |
1447 | .create_srq = rxe_create_srq, | |
676a80ad | 1448 | .create_user_ah = rxe_create_ah, |
c367074b | 1449 | .dealloc_driver = rxe_dealloc, |
beec0239 | 1450 | .dealloc_mw = rxe_dealloc_mw, |
573efc4b KH |
1451 | .dealloc_pd = rxe_dealloc_pd, |
1452 | .dealloc_ucontext = rxe_dealloc_ucontext, | |
1453 | .dereg_mr = rxe_dereg_mr, | |
1454 | .destroy_ah = rxe_destroy_ah, | |
1455 | .destroy_cq = rxe_destroy_cq, | |
1456 | .destroy_qp = rxe_destroy_qp, | |
1457 | .destroy_srq = rxe_destroy_srq, | |
1458 | .detach_mcast = rxe_detach_mcast, | |
915e4af5 | 1459 | .device_group = &rxe_attr_group, |
ca22354b | 1460 | .enable_driver = rxe_enable_driver, |
573efc4b KH |
1461 | .get_dma_mr = rxe_get_dma_mr, |
1462 | .get_hw_stats = rxe_ib_get_hw_stats, | |
1463 | .get_link_layer = rxe_get_link_layer, | |
573efc4b KH |
1464 | .get_port_immutable = rxe_port_immutable, |
1465 | .map_mr_sg = rxe_map_mr_sg, | |
1466 | .mmap = rxe_mmap, | |
1467 | .modify_ah = rxe_modify_ah, | |
1468 | .modify_device = rxe_modify_device, | |
1469 | .modify_port = rxe_modify_port, | |
1470 | .modify_qp = rxe_modify_qp, | |
1471 | .modify_srq = rxe_modify_srq, | |
1472 | .peek_cq = rxe_peek_cq, | |
1473 | .poll_cq = rxe_poll_cq, | |
1474 | .post_recv = rxe_post_recv, | |
1475 | .post_send = rxe_post_send, | |
1476 | .post_srq_recv = rxe_post_srq_recv, | |
1477 | .query_ah = rxe_query_ah, | |
1478 | .query_device = rxe_query_device, | |
1479 | .query_pkey = rxe_query_pkey, | |
1480 | .query_port = rxe_query_port, | |
1481 | .query_qp = rxe_query_qp, | |
1482 | .query_srq = rxe_query_srq, | |
1483 | .reg_user_mr = rxe_reg_user_mr, | |
1484 | .req_notify_cq = rxe_req_notify_cq, | |
544c7f62 | 1485 | .rereg_user_mr = rxe_rereg_user_mr, |
573efc4b | 1486 | .resize_cq = rxe_resize_cq, |
d3456914 LR |
1487 | |
1488 | INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah), | |
e39afe3d | 1489 | INIT_RDMA_OBJ_SIZE(ib_cq, rxe_cq, ibcq), |
21a428a0 | 1490 | INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd), |
514aee66 | 1491 | INIT_RDMA_OBJ_SIZE(ib_qp, rxe_qp, ibqp), |
68e326de | 1492 | INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq), |
a2a074ef | 1493 | INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc), |
364e282c | 1494 | INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw), |
573efc4b KH |
1495 | }; |
1496 | ||
66920e1b | 1497 | int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name) |
8700e3e7 MS |
1498 | { |
1499 | int err; | |
8700e3e7 MS |
1500 | struct ib_device *dev = &rxe->ib_dev; |
1501 | ||
dc78074a | 1502 | strscpy(dev->node_desc, "rxe", sizeof(dev->node_desc)); |
8700e3e7 | 1503 | |
8700e3e7 MS |
1504 | dev->node_type = RDMA_NODE_IB_CA; |
1505 | dev->phys_port_cnt = 1; | |
67cf3623 | 1506 | dev->num_comp_vectors = num_possible_cpus(); |
8700e3e7 | 1507 | dev->local_dma_lkey = 0; |
4d6f2859 YS |
1508 | addrconf_addr_eui48((unsigned char *)&dev->node_guid, |
1509 | rxe->ndev->dev_addr); | |
8700e3e7 | 1510 | |
5c419366 JG |
1511 | dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) | |
1512 | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ); | |
8700e3e7 | 1513 | |
573efc4b | 1514 | ib_set_device_ops(dev, &rxe_dev_ops); |
4c173f59 JG |
1515 | err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1); |
1516 | if (err) | |
1517 | return err; | |
8700e3e7 | 1518 | |
add2b3b8 BP |
1519 | err = rxe_icrc_init(rxe); |
1520 | if (err) | |
1521 | return err; | |
cee2688e | 1522 | |
e0477b34 | 1523 | err = ib_register_device(dev, ibdev_name, NULL); |
c367074b | 1524 | if (err) |
a9fb3287 | 1525 | rxe_dbg_dev(rxe, "failed with error %d\n", err); |
cee2688e | 1526 | |
ca22354b JG |
1527 | /* |
1528 | * Note that rxe may be invalid at this point if another thread | |
1529 | * unregistered it. | |
1530 | */ | |
8700e3e7 MS |
1531 | return err; |
1532 | } |