IB/core: Ethernet L2 attributes in verbs/cm structures
[linux-2.6-block.git] / drivers / infiniband / core / verbs.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
2a1d9b7f 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
33b9b3ee 8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
1da177e4
LT
9 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
1da177e4
LT
37 */
38
39#include <linux/errno.h>
40#include <linux/err.h>
b108d976 41#include <linux/export.h>
8c65b4a6 42#include <linux/string.h>
0e0ec7e0 43#include <linux/slab.h>
1da177e4 44
a4d61e84
RD
45#include <rdma/ib_verbs.h>
46#include <rdma/ib_cache.h>
dd5f03be 47#include <rdma/ib_addr.h>
1da177e4 48
bf6a9e31
JM
49int ib_rate_to_mult(enum ib_rate rate)
50{
51 switch (rate) {
52 case IB_RATE_2_5_GBPS: return 1;
53 case IB_RATE_5_GBPS: return 2;
54 case IB_RATE_10_GBPS: return 4;
55 case IB_RATE_20_GBPS: return 8;
56 case IB_RATE_30_GBPS: return 12;
57 case IB_RATE_40_GBPS: return 16;
58 case IB_RATE_60_GBPS: return 24;
59 case IB_RATE_80_GBPS: return 32;
60 case IB_RATE_120_GBPS: return 48;
61 default: return -1;
62 }
63}
64EXPORT_SYMBOL(ib_rate_to_mult);
65
66enum ib_rate mult_to_ib_rate(int mult)
67{
68 switch (mult) {
69 case 1: return IB_RATE_2_5_GBPS;
70 case 2: return IB_RATE_5_GBPS;
71 case 4: return IB_RATE_10_GBPS;
72 case 8: return IB_RATE_20_GBPS;
73 case 12: return IB_RATE_30_GBPS;
74 case 16: return IB_RATE_40_GBPS;
75 case 24: return IB_RATE_60_GBPS;
76 case 32: return IB_RATE_80_GBPS;
77 case 48: return IB_RATE_120_GBPS;
78 default: return IB_RATE_PORT_CURRENT;
79 }
80}
81EXPORT_SYMBOL(mult_to_ib_rate);
82
71eeba16
MA
83int ib_rate_to_mbps(enum ib_rate rate)
84{
85 switch (rate) {
86 case IB_RATE_2_5_GBPS: return 2500;
87 case IB_RATE_5_GBPS: return 5000;
88 case IB_RATE_10_GBPS: return 10000;
89 case IB_RATE_20_GBPS: return 20000;
90 case IB_RATE_30_GBPS: return 30000;
91 case IB_RATE_40_GBPS: return 40000;
92 case IB_RATE_60_GBPS: return 60000;
93 case IB_RATE_80_GBPS: return 80000;
94 case IB_RATE_120_GBPS: return 120000;
95 case IB_RATE_14_GBPS: return 14062;
96 case IB_RATE_56_GBPS: return 56250;
97 case IB_RATE_112_GBPS: return 112500;
98 case IB_RATE_168_GBPS: return 168750;
99 case IB_RATE_25_GBPS: return 25781;
100 case IB_RATE_100_GBPS: return 103125;
101 case IB_RATE_200_GBPS: return 206250;
102 case IB_RATE_300_GBPS: return 309375;
103 default: return -1;
104 }
105}
106EXPORT_SYMBOL(ib_rate_to_mbps);
107
07ebafba
TT
108enum rdma_transport_type
109rdma_node_get_transport(enum rdma_node_type node_type)
110{
111 switch (node_type) {
112 case RDMA_NODE_IB_CA:
113 case RDMA_NODE_IB_SWITCH:
114 case RDMA_NODE_IB_ROUTER:
115 return RDMA_TRANSPORT_IB;
116 case RDMA_NODE_RNIC:
117 return RDMA_TRANSPORT_IWARP;
180771a3
UM
118 case RDMA_NODE_USNIC:
119 return RDMA_TRANSPORT_USNIC;
07ebafba
TT
120 default:
121 BUG();
122 return 0;
123 }
124}
125EXPORT_SYMBOL(rdma_node_get_transport);
126
a3f5adaf
EC
127enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
128{
129 if (device->get_link_layer)
130 return device->get_link_layer(device, port_num);
131
132 switch (rdma_node_get_transport(device->node_type)) {
133 case RDMA_TRANSPORT_IB:
134 return IB_LINK_LAYER_INFINIBAND;
135 case RDMA_TRANSPORT_IWARP:
180771a3 136 case RDMA_TRANSPORT_USNIC:
a3f5adaf
EC
137 return IB_LINK_LAYER_ETHERNET;
138 default:
139 return IB_LINK_LAYER_UNSPECIFIED;
140 }
141}
142EXPORT_SYMBOL(rdma_port_get_link_layer);
143
1da177e4
LT
144/* Protection domains */
145
146struct ib_pd *ib_alloc_pd(struct ib_device *device)
147{
148 struct ib_pd *pd;
149
b5e81bf5 150 pd = device->alloc_pd(device, NULL, NULL);
1da177e4
LT
151
152 if (!IS_ERR(pd)) {
b5e81bf5
RD
153 pd->device = device;
154 pd->uobject = NULL;
1da177e4
LT
155 atomic_set(&pd->usecnt, 0);
156 }
157
158 return pd;
159}
160EXPORT_SYMBOL(ib_alloc_pd);
161
162int ib_dealloc_pd(struct ib_pd *pd)
163{
164 if (atomic_read(&pd->usecnt))
165 return -EBUSY;
166
167 return pd->device->dealloc_pd(pd);
168}
169EXPORT_SYMBOL(ib_dealloc_pd);
170
171/* Address handles */
172
173struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
174{
175 struct ib_ah *ah;
176
177 ah = pd->device->create_ah(pd, ah_attr);
178
179 if (!IS_ERR(ah)) {
b5e81bf5
RD
180 ah->device = pd->device;
181 ah->pd = pd;
182 ah->uobject = NULL;
1da177e4
LT
183 atomic_inc(&pd->usecnt);
184 }
185
186 return ah;
187}
188EXPORT_SYMBOL(ib_create_ah);
189
4e00d694
SH
190int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
191 struct ib_grh *grh, struct ib_ah_attr *ah_attr)
513789ed 192{
513789ed
HR
193 u32 flow_class;
194 u16 gid_index;
195 int ret;
dd5f03be
MB
196 int is_eth = (rdma_port_get_link_layer(device, port_num) ==
197 IB_LINK_LAYER_ETHERNET);
513789ed 198
4e00d694 199 memset(ah_attr, 0, sizeof *ah_attr);
dd5f03be
MB
200 if (is_eth) {
201 if (!(wc->wc_flags & IB_WC_GRH))
202 return -EPROTOTYPE;
203
204 if (wc->wc_flags & IB_WC_WITH_SMAC &&
205 wc->wc_flags & IB_WC_WITH_VLAN) {
206 memcpy(ah_attr->dmac, wc->smac, ETH_ALEN);
207 ah_attr->vlan_id = wc->vlan_id;
208 } else {
209 ret = rdma_addr_find_dmac_by_grh(&grh->dgid, &grh->sgid,
210 ah_attr->dmac, &ah_attr->vlan_id);
211 if (ret)
212 return ret;
213 }
214 } else {
215 ah_attr->vlan_id = 0xffff;
216 }
217
4e00d694
SH
218 ah_attr->dlid = wc->slid;
219 ah_attr->sl = wc->sl;
220 ah_attr->src_path_bits = wc->dlid_path_bits;
221 ah_attr->port_num = port_num;
513789ed
HR
222
223 if (wc->wc_flags & IB_WC_GRH) {
4e00d694
SH
224 ah_attr->ah_flags = IB_AH_GRH;
225 ah_attr->grh.dgid = grh->sgid;
513789ed 226
4e00d694 227 ret = ib_find_cached_gid(device, &grh->dgid, &port_num,
513789ed
HR
228 &gid_index);
229 if (ret)
4e00d694 230 return ret;
513789ed 231
4e00d694 232 ah_attr->grh.sgid_index = (u8) gid_index;
497677ab 233 flow_class = be32_to_cpu(grh->version_tclass_flow);
4e00d694 234 ah_attr->grh.flow_label = flow_class & 0xFFFFF;
47645d8d 235 ah_attr->grh.hop_limit = 0xFF;
4e00d694 236 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
513789ed 237 }
4e00d694
SH
238 return 0;
239}
240EXPORT_SYMBOL(ib_init_ah_from_wc);
241
242struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
243 struct ib_grh *grh, u8 port_num)
244{
245 struct ib_ah_attr ah_attr;
246 int ret;
247
248 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
249 if (ret)
250 return ERR_PTR(ret);
513789ed
HR
251
252 return ib_create_ah(pd, &ah_attr);
253}
254EXPORT_SYMBOL(ib_create_ah_from_wc);
255
1da177e4
LT
256int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
257{
258 return ah->device->modify_ah ?
259 ah->device->modify_ah(ah, ah_attr) :
260 -ENOSYS;
261}
262EXPORT_SYMBOL(ib_modify_ah);
263
264int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
265{
266 return ah->device->query_ah ?
267 ah->device->query_ah(ah, ah_attr) :
268 -ENOSYS;
269}
270EXPORT_SYMBOL(ib_query_ah);
271
272int ib_destroy_ah(struct ib_ah *ah)
273{
274 struct ib_pd *pd;
275 int ret;
276
277 pd = ah->pd;
278 ret = ah->device->destroy_ah(ah);
279 if (!ret)
280 atomic_dec(&pd->usecnt);
281
282 return ret;
283}
284EXPORT_SYMBOL(ib_destroy_ah);
285
d41fcc67
RD
286/* Shared receive queues */
287
288struct ib_srq *ib_create_srq(struct ib_pd *pd,
289 struct ib_srq_init_attr *srq_init_attr)
290{
291 struct ib_srq *srq;
292
293 if (!pd->device->create_srq)
294 return ERR_PTR(-ENOSYS);
295
296 srq = pd->device->create_srq(pd, srq_init_attr, NULL);
297
298 if (!IS_ERR(srq)) {
299 srq->device = pd->device;
300 srq->pd = pd;
301 srq->uobject = NULL;
302 srq->event_handler = srq_init_attr->event_handler;
303 srq->srq_context = srq_init_attr->srq_context;
96104eda 304 srq->srq_type = srq_init_attr->srq_type;
418d5130
SH
305 if (srq->srq_type == IB_SRQT_XRC) {
306 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
307 srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq;
308 atomic_inc(&srq->ext.xrc.xrcd->usecnt);
309 atomic_inc(&srq->ext.xrc.cq->usecnt);
310 }
d41fcc67
RD
311 atomic_inc(&pd->usecnt);
312 atomic_set(&srq->usecnt, 0);
313 }
314
315 return srq;
316}
317EXPORT_SYMBOL(ib_create_srq);
318
319int ib_modify_srq(struct ib_srq *srq,
320 struct ib_srq_attr *srq_attr,
321 enum ib_srq_attr_mask srq_attr_mask)
322{
7ce5eacb
DB
323 return srq->device->modify_srq ?
324 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
325 -ENOSYS;
d41fcc67
RD
326}
327EXPORT_SYMBOL(ib_modify_srq);
328
329int ib_query_srq(struct ib_srq *srq,
330 struct ib_srq_attr *srq_attr)
331{
332 return srq->device->query_srq ?
333 srq->device->query_srq(srq, srq_attr) : -ENOSYS;
334}
335EXPORT_SYMBOL(ib_query_srq);
336
337int ib_destroy_srq(struct ib_srq *srq)
338{
339 struct ib_pd *pd;
418d5130
SH
340 enum ib_srq_type srq_type;
341 struct ib_xrcd *uninitialized_var(xrcd);
342 struct ib_cq *uninitialized_var(cq);
d41fcc67
RD
343 int ret;
344
345 if (atomic_read(&srq->usecnt))
346 return -EBUSY;
347
348 pd = srq->pd;
418d5130
SH
349 srq_type = srq->srq_type;
350 if (srq_type == IB_SRQT_XRC) {
351 xrcd = srq->ext.xrc.xrcd;
352 cq = srq->ext.xrc.cq;
353 }
d41fcc67
RD
354
355 ret = srq->device->destroy_srq(srq);
418d5130 356 if (!ret) {
d41fcc67 357 atomic_dec(&pd->usecnt);
418d5130
SH
358 if (srq_type == IB_SRQT_XRC) {
359 atomic_dec(&xrcd->usecnt);
360 atomic_dec(&cq->usecnt);
361 }
362 }
d41fcc67
RD
363
364 return ret;
365}
366EXPORT_SYMBOL(ib_destroy_srq);
367
1da177e4
LT
368/* Queue pairs */
369
0e0ec7e0
SH
370static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
371{
372 struct ib_qp *qp = context;
73c40c61 373 unsigned long flags;
0e0ec7e0 374
73c40c61 375 spin_lock_irqsave(&qp->device->event_handler_lock, flags);
0e0ec7e0 376 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
eec9e29f
SP
377 if (event->element.qp->event_handler)
378 event->element.qp->event_handler(event, event->element.qp->qp_context);
73c40c61 379 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
0e0ec7e0
SH
380}
381
d3d72d90
SH
382static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
383{
384 mutex_lock(&xrcd->tgt_qp_mutex);
385 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
386 mutex_unlock(&xrcd->tgt_qp_mutex);
387}
388
0e0ec7e0
SH
389static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
390 void (*event_handler)(struct ib_event *, void *),
391 void *qp_context)
d3d72d90 392{
0e0ec7e0
SH
393 struct ib_qp *qp;
394 unsigned long flags;
395
396 qp = kzalloc(sizeof *qp, GFP_KERNEL);
397 if (!qp)
398 return ERR_PTR(-ENOMEM);
399
400 qp->real_qp = real_qp;
401 atomic_inc(&real_qp->usecnt);
402 qp->device = real_qp->device;
403 qp->event_handler = event_handler;
404 qp->qp_context = qp_context;
405 qp->qp_num = real_qp->qp_num;
406 qp->qp_type = real_qp->qp_type;
407
408 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
409 list_add(&qp->open_list, &real_qp->open_list);
410 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
411
412 return qp;
413}
414
415struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
416 struct ib_qp_open_attr *qp_open_attr)
417{
418 struct ib_qp *qp, *real_qp;
419
420 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
421 return ERR_PTR(-EINVAL);
422
423 qp = ERR_PTR(-EINVAL);
d3d72d90 424 mutex_lock(&xrcd->tgt_qp_mutex);
0e0ec7e0
SH
425 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
426 if (real_qp->qp_num == qp_open_attr->qp_num) {
427 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
428 qp_open_attr->qp_context);
429 break;
430 }
431 }
d3d72d90 432 mutex_unlock(&xrcd->tgt_qp_mutex);
0e0ec7e0 433 return qp;
d3d72d90 434}
0e0ec7e0 435EXPORT_SYMBOL(ib_open_qp);
d3d72d90 436
1da177e4
LT
437struct ib_qp *ib_create_qp(struct ib_pd *pd,
438 struct ib_qp_init_attr *qp_init_attr)
439{
0e0ec7e0 440 struct ib_qp *qp, *real_qp;
b42b63cf 441 struct ib_device *device;
1da177e4 442
b42b63cf
SH
443 device = pd ? pd->device : qp_init_attr->xrcd->device;
444 qp = device->create_qp(pd, qp_init_attr, NULL);
1da177e4
LT
445
446 if (!IS_ERR(qp)) {
0e0ec7e0
SH
447 qp->device = device;
448 qp->real_qp = qp;
449 qp->uobject = NULL;
450 qp->qp_type = qp_init_attr->qp_type;
b42b63cf 451
e47e321a 452 atomic_set(&qp->usecnt, 0);
b42b63cf 453 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
0e0ec7e0
SH
454 qp->event_handler = __ib_shared_qp_event_handler;
455 qp->qp_context = qp;
b42b63cf
SH
456 qp->pd = NULL;
457 qp->send_cq = qp->recv_cq = NULL;
458 qp->srq = NULL;
459 qp->xrcd = qp_init_attr->xrcd;
460 atomic_inc(&qp_init_attr->xrcd->usecnt);
0e0ec7e0 461 INIT_LIST_HEAD(&qp->open_list);
0e0ec7e0
SH
462
463 real_qp = qp;
464 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
465 qp_init_attr->qp_context);
466 if (!IS_ERR(qp))
467 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
468 else
469 real_qp->device->destroy_qp(real_qp);
b42b63cf 470 } else {
0e0ec7e0
SH
471 qp->event_handler = qp_init_attr->event_handler;
472 qp->qp_context = qp_init_attr->qp_context;
b42b63cf
SH
473 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
474 qp->recv_cq = NULL;
475 qp->srq = NULL;
476 } else {
477 qp->recv_cq = qp_init_attr->recv_cq;
478 atomic_inc(&qp_init_attr->recv_cq->usecnt);
479 qp->srq = qp_init_attr->srq;
480 if (qp->srq)
481 atomic_inc(&qp_init_attr->srq->usecnt);
482 }
483
484 qp->pd = pd;
485 qp->send_cq = qp_init_attr->send_cq;
486 qp->xrcd = NULL;
487
488 atomic_inc(&pd->usecnt);
489 atomic_inc(&qp_init_attr->send_cq->usecnt);
490 }
1da177e4
LT
491 }
492
493 return qp;
494}
495EXPORT_SYMBOL(ib_create_qp);
496
8a51866f
RD
497static const struct {
498 int valid;
b42b63cf 499 enum ib_qp_attr_mask req_param[IB_QPT_MAX];
dd5f03be 500 enum ib_qp_attr_mask req_param_add_eth[IB_QPT_MAX];
b42b63cf 501 enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
dd5f03be 502 enum ib_qp_attr_mask opt_param_add_eth[IB_QPT_MAX];
8a51866f
RD
503} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
504 [IB_QPS_RESET] = {
505 [IB_QPS_RESET] = { .valid = 1 },
8a51866f
RD
506 [IB_QPS_INIT] = {
507 .valid = 1,
508 .req_param = {
509 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
510 IB_QP_PORT |
511 IB_QP_QKEY),
c938a616 512 [IB_QPT_RAW_PACKET] = IB_QP_PORT,
8a51866f
RD
513 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
514 IB_QP_PORT |
515 IB_QP_ACCESS_FLAGS),
516 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
517 IB_QP_PORT |
518 IB_QP_ACCESS_FLAGS),
b42b63cf
SH
519 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
520 IB_QP_PORT |
521 IB_QP_ACCESS_FLAGS),
522 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
523 IB_QP_PORT |
524 IB_QP_ACCESS_FLAGS),
8a51866f
RD
525 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
526 IB_QP_QKEY),
527 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
528 IB_QP_QKEY),
529 }
530 },
531 },
532 [IB_QPS_INIT] = {
533 [IB_QPS_RESET] = { .valid = 1 },
534 [IB_QPS_ERR] = { .valid = 1 },
535 [IB_QPS_INIT] = {
536 .valid = 1,
537 .opt_param = {
538 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
539 IB_QP_PORT |
540 IB_QP_QKEY),
541 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
542 IB_QP_PORT |
543 IB_QP_ACCESS_FLAGS),
544 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
545 IB_QP_PORT |
546 IB_QP_ACCESS_FLAGS),
b42b63cf
SH
547 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
548 IB_QP_PORT |
549 IB_QP_ACCESS_FLAGS),
550 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
551 IB_QP_PORT |
552 IB_QP_ACCESS_FLAGS),
8a51866f
RD
553 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
554 IB_QP_QKEY),
555 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
556 IB_QP_QKEY),
557 }
558 },
559 [IB_QPS_RTR] = {
560 .valid = 1,
561 .req_param = {
562 [IB_QPT_UC] = (IB_QP_AV |
563 IB_QP_PATH_MTU |
564 IB_QP_DEST_QPN |
565 IB_QP_RQ_PSN),
566 [IB_QPT_RC] = (IB_QP_AV |
567 IB_QP_PATH_MTU |
568 IB_QP_DEST_QPN |
569 IB_QP_RQ_PSN |
570 IB_QP_MAX_DEST_RD_ATOMIC |
571 IB_QP_MIN_RNR_TIMER),
b42b63cf
SH
572 [IB_QPT_XRC_INI] = (IB_QP_AV |
573 IB_QP_PATH_MTU |
574 IB_QP_DEST_QPN |
575 IB_QP_RQ_PSN),
576 [IB_QPT_XRC_TGT] = (IB_QP_AV |
577 IB_QP_PATH_MTU |
578 IB_QP_DEST_QPN |
579 IB_QP_RQ_PSN |
580 IB_QP_MAX_DEST_RD_ATOMIC |
581 IB_QP_MIN_RNR_TIMER),
8a51866f 582 },
dd5f03be
MB
583 .req_param_add_eth = {
584 [IB_QPT_RC] = (IB_QP_SMAC),
585 [IB_QPT_UC] = (IB_QP_SMAC),
586 [IB_QPT_XRC_INI] = (IB_QP_SMAC),
587 [IB_QPT_XRC_TGT] = (IB_QP_SMAC)
588 },
8a51866f
RD
589 .opt_param = {
590 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
591 IB_QP_QKEY),
592 [IB_QPT_UC] = (IB_QP_ALT_PATH |
593 IB_QP_ACCESS_FLAGS |
594 IB_QP_PKEY_INDEX),
595 [IB_QPT_RC] = (IB_QP_ALT_PATH |
596 IB_QP_ACCESS_FLAGS |
597 IB_QP_PKEY_INDEX),
b42b63cf
SH
598 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
599 IB_QP_ACCESS_FLAGS |
600 IB_QP_PKEY_INDEX),
601 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
602 IB_QP_ACCESS_FLAGS |
603 IB_QP_PKEY_INDEX),
8a51866f
RD
604 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
605 IB_QP_QKEY),
606 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
607 IB_QP_QKEY),
dd5f03be
MB
608 },
609 .opt_param_add_eth = {
610 [IB_QPT_RC] = (IB_QP_ALT_SMAC |
611 IB_QP_VID |
612 IB_QP_ALT_VID),
613 [IB_QPT_UC] = (IB_QP_ALT_SMAC |
614 IB_QP_VID |
615 IB_QP_ALT_VID),
616 [IB_QPT_XRC_INI] = (IB_QP_ALT_SMAC |
617 IB_QP_VID |
618 IB_QP_ALT_VID),
619 [IB_QPT_XRC_TGT] = (IB_QP_ALT_SMAC |
620 IB_QP_VID |
621 IB_QP_ALT_VID)
622 }
8a51866f
RD
623 }
624 },
625 [IB_QPS_RTR] = {
626 [IB_QPS_RESET] = { .valid = 1 },
627 [IB_QPS_ERR] = { .valid = 1 },
628 [IB_QPS_RTS] = {
629 .valid = 1,
630 .req_param = {
631 [IB_QPT_UD] = IB_QP_SQ_PSN,
632 [IB_QPT_UC] = IB_QP_SQ_PSN,
633 [IB_QPT_RC] = (IB_QP_TIMEOUT |
634 IB_QP_RETRY_CNT |
635 IB_QP_RNR_RETRY |
636 IB_QP_SQ_PSN |
637 IB_QP_MAX_QP_RD_ATOMIC),
b42b63cf
SH
638 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
639 IB_QP_RETRY_CNT |
640 IB_QP_RNR_RETRY |
641 IB_QP_SQ_PSN |
642 IB_QP_MAX_QP_RD_ATOMIC),
643 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
644 IB_QP_SQ_PSN),
8a51866f
RD
645 [IB_QPT_SMI] = IB_QP_SQ_PSN,
646 [IB_QPT_GSI] = IB_QP_SQ_PSN,
647 },
648 .opt_param = {
649 [IB_QPT_UD] = (IB_QP_CUR_STATE |
650 IB_QP_QKEY),
651 [IB_QPT_UC] = (IB_QP_CUR_STATE |
652 IB_QP_ALT_PATH |
653 IB_QP_ACCESS_FLAGS |
654 IB_QP_PATH_MIG_STATE),
655 [IB_QPT_RC] = (IB_QP_CUR_STATE |
656 IB_QP_ALT_PATH |
657 IB_QP_ACCESS_FLAGS |
658 IB_QP_MIN_RNR_TIMER |
659 IB_QP_PATH_MIG_STATE),
b42b63cf
SH
660 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
661 IB_QP_ALT_PATH |
662 IB_QP_ACCESS_FLAGS |
663 IB_QP_PATH_MIG_STATE),
664 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
665 IB_QP_ALT_PATH |
666 IB_QP_ACCESS_FLAGS |
667 IB_QP_MIN_RNR_TIMER |
668 IB_QP_PATH_MIG_STATE),
8a51866f
RD
669 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
670 IB_QP_QKEY),
671 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
672 IB_QP_QKEY),
673 }
674 }
675 },
676 [IB_QPS_RTS] = {
677 [IB_QPS_RESET] = { .valid = 1 },
678 [IB_QPS_ERR] = { .valid = 1 },
679 [IB_QPS_RTS] = {
680 .valid = 1,
681 .opt_param = {
682 [IB_QPT_UD] = (IB_QP_CUR_STATE |
683 IB_QP_QKEY),
4546d31d
DB
684 [IB_QPT_UC] = (IB_QP_CUR_STATE |
685 IB_QP_ACCESS_FLAGS |
8a51866f
RD
686 IB_QP_ALT_PATH |
687 IB_QP_PATH_MIG_STATE),
4546d31d
DB
688 [IB_QPT_RC] = (IB_QP_CUR_STATE |
689 IB_QP_ACCESS_FLAGS |
8a51866f
RD
690 IB_QP_ALT_PATH |
691 IB_QP_PATH_MIG_STATE |
692 IB_QP_MIN_RNR_TIMER),
b42b63cf
SH
693 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
694 IB_QP_ACCESS_FLAGS |
695 IB_QP_ALT_PATH |
696 IB_QP_PATH_MIG_STATE),
697 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
698 IB_QP_ACCESS_FLAGS |
699 IB_QP_ALT_PATH |
700 IB_QP_PATH_MIG_STATE |
701 IB_QP_MIN_RNR_TIMER),
8a51866f
RD
702 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
703 IB_QP_QKEY),
704 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
705 IB_QP_QKEY),
706 }
707 },
708 [IB_QPS_SQD] = {
709 .valid = 1,
710 .opt_param = {
711 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
712 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
713 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
b42b63cf
SH
714 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
715 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
8a51866f
RD
716 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
717 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
718 }
719 },
720 },
721 [IB_QPS_SQD] = {
722 [IB_QPS_RESET] = { .valid = 1 },
723 [IB_QPS_ERR] = { .valid = 1 },
724 [IB_QPS_RTS] = {
725 .valid = 1,
726 .opt_param = {
727 [IB_QPT_UD] = (IB_QP_CUR_STATE |
728 IB_QP_QKEY),
729 [IB_QPT_UC] = (IB_QP_CUR_STATE |
730 IB_QP_ALT_PATH |
731 IB_QP_ACCESS_FLAGS |
732 IB_QP_PATH_MIG_STATE),
733 [IB_QPT_RC] = (IB_QP_CUR_STATE |
734 IB_QP_ALT_PATH |
735 IB_QP_ACCESS_FLAGS |
736 IB_QP_MIN_RNR_TIMER |
737 IB_QP_PATH_MIG_STATE),
b42b63cf
SH
738 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
739 IB_QP_ALT_PATH |
740 IB_QP_ACCESS_FLAGS |
741 IB_QP_PATH_MIG_STATE),
742 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
743 IB_QP_ALT_PATH |
744 IB_QP_ACCESS_FLAGS |
745 IB_QP_MIN_RNR_TIMER |
746 IB_QP_PATH_MIG_STATE),
8a51866f
RD
747 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
748 IB_QP_QKEY),
749 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
750 IB_QP_QKEY),
751 }
752 },
753 [IB_QPS_SQD] = {
754 .valid = 1,
755 .opt_param = {
756 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
757 IB_QP_QKEY),
758 [IB_QPT_UC] = (IB_QP_AV |
8a51866f
RD
759 IB_QP_ALT_PATH |
760 IB_QP_ACCESS_FLAGS |
761 IB_QP_PKEY_INDEX |
762 IB_QP_PATH_MIG_STATE),
763 [IB_QPT_RC] = (IB_QP_PORT |
764 IB_QP_AV |
765 IB_QP_TIMEOUT |
766 IB_QP_RETRY_CNT |
767 IB_QP_RNR_RETRY |
768 IB_QP_MAX_QP_RD_ATOMIC |
769 IB_QP_MAX_DEST_RD_ATOMIC |
8a51866f
RD
770 IB_QP_ALT_PATH |
771 IB_QP_ACCESS_FLAGS |
772 IB_QP_PKEY_INDEX |
773 IB_QP_MIN_RNR_TIMER |
774 IB_QP_PATH_MIG_STATE),
b42b63cf
SH
775 [IB_QPT_XRC_INI] = (IB_QP_PORT |
776 IB_QP_AV |
777 IB_QP_TIMEOUT |
778 IB_QP_RETRY_CNT |
779 IB_QP_RNR_RETRY |
780 IB_QP_MAX_QP_RD_ATOMIC |
781 IB_QP_ALT_PATH |
782 IB_QP_ACCESS_FLAGS |
783 IB_QP_PKEY_INDEX |
784 IB_QP_PATH_MIG_STATE),
785 [IB_QPT_XRC_TGT] = (IB_QP_PORT |
786 IB_QP_AV |
787 IB_QP_TIMEOUT |
788 IB_QP_MAX_DEST_RD_ATOMIC |
789 IB_QP_ALT_PATH |
790 IB_QP_ACCESS_FLAGS |
791 IB_QP_PKEY_INDEX |
792 IB_QP_MIN_RNR_TIMER |
793 IB_QP_PATH_MIG_STATE),
8a51866f
RD
794 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
795 IB_QP_QKEY),
796 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
797 IB_QP_QKEY),
798 }
799 }
800 },
801 [IB_QPS_SQE] = {
802 [IB_QPS_RESET] = { .valid = 1 },
803 [IB_QPS_ERR] = { .valid = 1 },
804 [IB_QPS_RTS] = {
805 .valid = 1,
806 .opt_param = {
807 [IB_QPT_UD] = (IB_QP_CUR_STATE |
808 IB_QP_QKEY),
809 [IB_QPT_UC] = (IB_QP_CUR_STATE |
810 IB_QP_ACCESS_FLAGS),
811 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
812 IB_QP_QKEY),
813 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
814 IB_QP_QKEY),
815 }
816 }
817 },
818 [IB_QPS_ERR] = {
819 [IB_QPS_RESET] = { .valid = 1 },
820 [IB_QPS_ERR] = { .valid = 1 }
821 }
822};
823
824int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
dd5f03be
MB
825 enum ib_qp_type type, enum ib_qp_attr_mask mask,
826 enum rdma_link_layer ll)
8a51866f
RD
827{
828 enum ib_qp_attr_mask req_param, opt_param;
829
830 if (cur_state < 0 || cur_state > IB_QPS_ERR ||
831 next_state < 0 || next_state > IB_QPS_ERR)
832 return 0;
833
834 if (mask & IB_QP_CUR_STATE &&
835 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
836 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
837 return 0;
838
839 if (!qp_state_table[cur_state][next_state].valid)
840 return 0;
841
842 req_param = qp_state_table[cur_state][next_state].req_param[type];
843 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
844
dd5f03be
MB
845 if (ll == IB_LINK_LAYER_ETHERNET) {
846 req_param |= qp_state_table[cur_state][next_state].
847 req_param_add_eth[type];
848 opt_param |= qp_state_table[cur_state][next_state].
849 opt_param_add_eth[type];
850 }
851
8a51866f
RD
852 if ((mask & req_param) != req_param)
853 return 0;
854
855 if (mask & ~(req_param | opt_param | IB_QP_STATE))
856 return 0;
857
858 return 1;
859}
860EXPORT_SYMBOL(ib_modify_qp_is_ok);
861
1da177e4
LT
862int ib_modify_qp(struct ib_qp *qp,
863 struct ib_qp_attr *qp_attr,
864 int qp_attr_mask)
865{
0e0ec7e0 866 return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
1da177e4
LT
867}
868EXPORT_SYMBOL(ib_modify_qp);
869
870int ib_query_qp(struct ib_qp *qp,
871 struct ib_qp_attr *qp_attr,
872 int qp_attr_mask,
873 struct ib_qp_init_attr *qp_init_attr)
874{
875 return qp->device->query_qp ?
0e0ec7e0 876 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
1da177e4
LT
877 -ENOSYS;
878}
879EXPORT_SYMBOL(ib_query_qp);
880
0e0ec7e0
SH
881int ib_close_qp(struct ib_qp *qp)
882{
883 struct ib_qp *real_qp;
884 unsigned long flags;
885
886 real_qp = qp->real_qp;
887 if (real_qp == qp)
888 return -EINVAL;
889
890 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
891 list_del(&qp->open_list);
892 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
893
894 atomic_dec(&real_qp->usecnt);
895 kfree(qp);
896
897 return 0;
898}
899EXPORT_SYMBOL(ib_close_qp);
900
901static int __ib_destroy_shared_qp(struct ib_qp *qp)
902{
903 struct ib_xrcd *xrcd;
904 struct ib_qp *real_qp;
905 int ret;
906
907 real_qp = qp->real_qp;
908 xrcd = real_qp->xrcd;
909
910 mutex_lock(&xrcd->tgt_qp_mutex);
911 ib_close_qp(qp);
912 if (atomic_read(&real_qp->usecnt) == 0)
913 list_del(&real_qp->xrcd_list);
914 else
915 real_qp = NULL;
916 mutex_unlock(&xrcd->tgt_qp_mutex);
917
918 if (real_qp) {
919 ret = ib_destroy_qp(real_qp);
920 if (!ret)
921 atomic_dec(&xrcd->usecnt);
922 else
923 __ib_insert_xrcd_qp(xrcd, real_qp);
924 }
925
926 return 0;
927}
928
1da177e4
LT
929int ib_destroy_qp(struct ib_qp *qp)
930{
931 struct ib_pd *pd;
932 struct ib_cq *scq, *rcq;
933 struct ib_srq *srq;
934 int ret;
935
0e0ec7e0
SH
936 if (atomic_read(&qp->usecnt))
937 return -EBUSY;
938
939 if (qp->real_qp != qp)
940 return __ib_destroy_shared_qp(qp);
941
b42b63cf
SH
942 pd = qp->pd;
943 scq = qp->send_cq;
944 rcq = qp->recv_cq;
945 srq = qp->srq;
1da177e4
LT
946
947 ret = qp->device->destroy_qp(qp);
948 if (!ret) {
b42b63cf
SH
949 if (pd)
950 atomic_dec(&pd->usecnt);
951 if (scq)
952 atomic_dec(&scq->usecnt);
953 if (rcq)
954 atomic_dec(&rcq->usecnt);
1da177e4
LT
955 if (srq)
956 atomic_dec(&srq->usecnt);
957 }
958
959 return ret;
960}
961EXPORT_SYMBOL(ib_destroy_qp);
962
963/* Completion queues */
964
965struct ib_cq *ib_create_cq(struct ib_device *device,
966 ib_comp_handler comp_handler,
967 void (*event_handler)(struct ib_event *, void *),
f4fd0b22 968 void *cq_context, int cqe, int comp_vector)
1da177e4
LT
969{
970 struct ib_cq *cq;
971
f4fd0b22 972 cq = device->create_cq(device, cqe, comp_vector, NULL, NULL);
1da177e4
LT
973
974 if (!IS_ERR(cq)) {
975 cq->device = device;
b5e81bf5 976 cq->uobject = NULL;
1da177e4
LT
977 cq->comp_handler = comp_handler;
978 cq->event_handler = event_handler;
979 cq->cq_context = cq_context;
980 atomic_set(&cq->usecnt, 0);
981 }
982
983 return cq;
984}
985EXPORT_SYMBOL(ib_create_cq);
986
2dd57162
EC
987int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
988{
989 return cq->device->modify_cq ?
990 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
991}
992EXPORT_SYMBOL(ib_modify_cq);
993
1da177e4
LT
994int ib_destroy_cq(struct ib_cq *cq)
995{
996 if (atomic_read(&cq->usecnt))
997 return -EBUSY;
998
999 return cq->device->destroy_cq(cq);
1000}
1001EXPORT_SYMBOL(ib_destroy_cq);
1002
a74cd4af 1003int ib_resize_cq(struct ib_cq *cq, int cqe)
1da177e4 1004{
40de2e54 1005 return cq->device->resize_cq ?
33b9b3ee 1006 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
1da177e4
LT
1007}
1008EXPORT_SYMBOL(ib_resize_cq);
1009
1010/* Memory regions */
1011
1012struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
1013{
1014 struct ib_mr *mr;
1c636f80
EC
1015 int err;
1016
1017 err = ib_check_mr_access(mr_access_flags);
1018 if (err)
1019 return ERR_PTR(err);
1da177e4
LT
1020
1021 mr = pd->device->get_dma_mr(pd, mr_access_flags);
1022
1023 if (!IS_ERR(mr)) {
b5e81bf5
RD
1024 mr->device = pd->device;
1025 mr->pd = pd;
1026 mr->uobject = NULL;
1da177e4
LT
1027 atomic_inc(&pd->usecnt);
1028 atomic_set(&mr->usecnt, 0);
1029 }
1030
1031 return mr;
1032}
1033EXPORT_SYMBOL(ib_get_dma_mr);
1034
1035struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1036 struct ib_phys_buf *phys_buf_array,
1037 int num_phys_buf,
1038 int mr_access_flags,
1039 u64 *iova_start)
1040{
1041 struct ib_mr *mr;
1c636f80
EC
1042 int err;
1043
1044 err = ib_check_mr_access(mr_access_flags);
1045 if (err)
1046 return ERR_PTR(err);
1da177e4 1047
7ce5eacb
DB
1048 if (!pd->device->reg_phys_mr)
1049 return ERR_PTR(-ENOSYS);
1050
1da177e4
LT
1051 mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
1052 mr_access_flags, iova_start);
1053
1054 if (!IS_ERR(mr)) {
b5e81bf5
RD
1055 mr->device = pd->device;
1056 mr->pd = pd;
1057 mr->uobject = NULL;
1da177e4
LT
1058 atomic_inc(&pd->usecnt);
1059 atomic_set(&mr->usecnt, 0);
1060 }
1061
1062 return mr;
1063}
1064EXPORT_SYMBOL(ib_reg_phys_mr);
1065
1066int ib_rereg_phys_mr(struct ib_mr *mr,
1067 int mr_rereg_mask,
1068 struct ib_pd *pd,
1069 struct ib_phys_buf *phys_buf_array,
1070 int num_phys_buf,
1071 int mr_access_flags,
1072 u64 *iova_start)
1073{
1074 struct ib_pd *old_pd;
1075 int ret;
1076
1c636f80
EC
1077 ret = ib_check_mr_access(mr_access_flags);
1078 if (ret)
1079 return ret;
1080
1da177e4
LT
1081 if (!mr->device->rereg_phys_mr)
1082 return -ENOSYS;
1083
1084 if (atomic_read(&mr->usecnt))
1085 return -EBUSY;
1086
1087 old_pd = mr->pd;
1088
1089 ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
1090 phys_buf_array, num_phys_buf,
1091 mr_access_flags, iova_start);
1092
1093 if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
1094 atomic_dec(&old_pd->usecnt);
1095 atomic_inc(&pd->usecnt);
1096 }
1097
1098 return ret;
1099}
1100EXPORT_SYMBOL(ib_rereg_phys_mr);
1101
1102int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
1103{
1104 return mr->device->query_mr ?
1105 mr->device->query_mr(mr, mr_attr) : -ENOSYS;
1106}
1107EXPORT_SYMBOL(ib_query_mr);
1108
1109int ib_dereg_mr(struct ib_mr *mr)
1110{
1111 struct ib_pd *pd;
1112 int ret;
1113
1114 if (atomic_read(&mr->usecnt))
1115 return -EBUSY;
1116
1117 pd = mr->pd;
1118 ret = mr->device->dereg_mr(mr);
1119 if (!ret)
1120 atomic_dec(&pd->usecnt);
1121
1122 return ret;
1123}
1124EXPORT_SYMBOL(ib_dereg_mr);
1125
00f7ec36
SW
1126struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
1127{
1128 struct ib_mr *mr;
1129
1130 if (!pd->device->alloc_fast_reg_mr)
1131 return ERR_PTR(-ENOSYS);
1132
1133 mr = pd->device->alloc_fast_reg_mr(pd, max_page_list_len);
1134
1135 if (!IS_ERR(mr)) {
1136 mr->device = pd->device;
1137 mr->pd = pd;
1138 mr->uobject = NULL;
1139 atomic_inc(&pd->usecnt);
1140 atomic_set(&mr->usecnt, 0);
1141 }
1142
1143 return mr;
1144}
1145EXPORT_SYMBOL(ib_alloc_fast_reg_mr);
1146
1147struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(struct ib_device *device,
1148 int max_page_list_len)
1149{
1150 struct ib_fast_reg_page_list *page_list;
1151
1152 if (!device->alloc_fast_reg_page_list)
1153 return ERR_PTR(-ENOSYS);
1154
1155 page_list = device->alloc_fast_reg_page_list(device, max_page_list_len);
1156
1157 if (!IS_ERR(page_list)) {
1158 page_list->device = device;
1159 page_list->max_page_list_len = max_page_list_len;
1160 }
1161
1162 return page_list;
1163}
1164EXPORT_SYMBOL(ib_alloc_fast_reg_page_list);
1165
1166void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
1167{
1168 page_list->device->free_fast_reg_page_list(page_list);
1169}
1170EXPORT_SYMBOL(ib_free_fast_reg_page_list);
1171
1da177e4
LT
1172/* Memory windows */
1173
7083e42e 1174struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
1da177e4
LT
1175{
1176 struct ib_mw *mw;
1177
1178 if (!pd->device->alloc_mw)
1179 return ERR_PTR(-ENOSYS);
1180
7083e42e 1181 mw = pd->device->alloc_mw(pd, type);
1da177e4 1182 if (!IS_ERR(mw)) {
b5e81bf5
RD
1183 mw->device = pd->device;
1184 mw->pd = pd;
1185 mw->uobject = NULL;
7083e42e 1186 mw->type = type;
1da177e4
LT
1187 atomic_inc(&pd->usecnt);
1188 }
1189
1190 return mw;
1191}
1192EXPORT_SYMBOL(ib_alloc_mw);
1193
1194int ib_dealloc_mw(struct ib_mw *mw)
1195{
1196 struct ib_pd *pd;
1197 int ret;
1198
1199 pd = mw->pd;
1200 ret = mw->device->dealloc_mw(mw);
1201 if (!ret)
1202 atomic_dec(&pd->usecnt);
1203
1204 return ret;
1205}
1206EXPORT_SYMBOL(ib_dealloc_mw);
1207
1208/* "Fast" memory regions */
1209
1210struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1211 int mr_access_flags,
1212 struct ib_fmr_attr *fmr_attr)
1213{
1214 struct ib_fmr *fmr;
1215
1216 if (!pd->device->alloc_fmr)
1217 return ERR_PTR(-ENOSYS);
1218
1219 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
1220 if (!IS_ERR(fmr)) {
1221 fmr->device = pd->device;
1222 fmr->pd = pd;
1223 atomic_inc(&pd->usecnt);
1224 }
1225
1226 return fmr;
1227}
1228EXPORT_SYMBOL(ib_alloc_fmr);
1229
1230int ib_unmap_fmr(struct list_head *fmr_list)
1231{
1232 struct ib_fmr *fmr;
1233
1234 if (list_empty(fmr_list))
1235 return 0;
1236
1237 fmr = list_entry(fmr_list->next, struct ib_fmr, list);
1238 return fmr->device->unmap_fmr(fmr_list);
1239}
1240EXPORT_SYMBOL(ib_unmap_fmr);
1241
1242int ib_dealloc_fmr(struct ib_fmr *fmr)
1243{
1244 struct ib_pd *pd;
1245 int ret;
1246
1247 pd = fmr->pd;
1248 ret = fmr->device->dealloc_fmr(fmr);
1249 if (!ret)
1250 atomic_dec(&pd->usecnt);
1251
1252 return ret;
1253}
1254EXPORT_SYMBOL(ib_dealloc_fmr);
1255
1256/* Multicast groups */
1257
1258int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1259{
c3bccbfb
OG
1260 int ret;
1261
0c33aeed
JM
1262 if (!qp->device->attach_mcast)
1263 return -ENOSYS;
1264 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1265 return -EINVAL;
1266
c3bccbfb
OG
1267 ret = qp->device->attach_mcast(qp, gid, lid);
1268 if (!ret)
1269 atomic_inc(&qp->usecnt);
1270 return ret;
1da177e4
LT
1271}
1272EXPORT_SYMBOL(ib_attach_mcast);
1273
1274int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1275{
c3bccbfb
OG
1276 int ret;
1277
0c33aeed
JM
1278 if (!qp->device->detach_mcast)
1279 return -ENOSYS;
1280 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1281 return -EINVAL;
1282
c3bccbfb
OG
1283 ret = qp->device->detach_mcast(qp, gid, lid);
1284 if (!ret)
1285 atomic_dec(&qp->usecnt);
1286 return ret;
1da177e4
LT
1287}
1288EXPORT_SYMBOL(ib_detach_mcast);
59991f94
SH
1289
1290struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
1291{
1292 struct ib_xrcd *xrcd;
1293
1294 if (!device->alloc_xrcd)
1295 return ERR_PTR(-ENOSYS);
1296
1297 xrcd = device->alloc_xrcd(device, NULL, NULL);
1298 if (!IS_ERR(xrcd)) {
1299 xrcd->device = device;
53d0bd1e 1300 xrcd->inode = NULL;
59991f94 1301 atomic_set(&xrcd->usecnt, 0);
d3d72d90
SH
1302 mutex_init(&xrcd->tgt_qp_mutex);
1303 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
59991f94
SH
1304 }
1305
1306 return xrcd;
1307}
1308EXPORT_SYMBOL(ib_alloc_xrcd);
1309
1310int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1311{
d3d72d90
SH
1312 struct ib_qp *qp;
1313 int ret;
1314
59991f94
SH
1315 if (atomic_read(&xrcd->usecnt))
1316 return -EBUSY;
1317
d3d72d90
SH
1318 while (!list_empty(&xrcd->tgt_qp_list)) {
1319 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
1320 ret = ib_destroy_qp(qp);
1321 if (ret)
1322 return ret;
1323 }
1324
59991f94
SH
1325 return xrcd->device->dealloc_xrcd(xrcd);
1326}
1327EXPORT_SYMBOL(ib_dealloc_xrcd);
319a441d
HHZ
1328
1329struct ib_flow *ib_create_flow(struct ib_qp *qp,
1330 struct ib_flow_attr *flow_attr,
1331 int domain)
1332{
1333 struct ib_flow *flow_id;
1334 if (!qp->device->create_flow)
1335 return ERR_PTR(-ENOSYS);
1336
1337 flow_id = qp->device->create_flow(qp, flow_attr, domain);
1338 if (!IS_ERR(flow_id))
1339 atomic_inc(&qp->usecnt);
1340 return flow_id;
1341}
1342EXPORT_SYMBOL(ib_create_flow);
1343
1344int ib_destroy_flow(struct ib_flow *flow_id)
1345{
1346 int err;
1347 struct ib_qp *qp = flow_id->qp;
1348
1349 err = qp->device->destroy_flow(flow_id);
1350 if (!err)
1351 atomic_dec(&qp->usecnt);
1352 return err;
1353}
1354EXPORT_SYMBOL(ib_destroy_flow);