Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[linux-block.git] / drivers / infiniband / hw / mthca / mthca_provider.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
cd4e8fb4 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4885bf64 4 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
2a1d9b7f
RD
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
1da177e4
LT
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 *
cf311cd4 36 * $Id: mthca_provider.c 4859 2006-01-09 21:55:10Z roland $
1da177e4
LT
37 */
38
a4d61e84 39#include <rdma/ib_smi.h>
883a99c7 40#include <rdma/ib_user_verbs.h>
53b8b3ff 41#include <linux/mm.h>
1da177e4
LT
42
43#include "mthca_dev.h"
44#include "mthca_cmd.h"
5e0b537c
RD
45#include "mthca_user.h"
46#include "mthca_memfree.h"
1da177e4 47
87635b71
RD
48static void init_query_mad(struct ib_smp *mad)
49{
50 mad->base_version = 1;
51 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
52 mad->class_version = 1;
53 mad->method = IB_MGMT_METHOD_GET;
54}
55
1da177e4
LT
56static int mthca_query_device(struct ib_device *ibdev,
57 struct ib_device_attr *props)
58{
59 struct ib_smp *in_mad = NULL;
60 struct ib_smp *out_mad = NULL;
61 int err = -ENOMEM;
62 struct mthca_dev* mdev = to_mdev(ibdev);
63
64 u8 status;
65
105e50a5 66 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1da177e4
LT
67 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
68 if (!in_mad || !out_mad)
69 goto out;
70
a852092e 71 memset(props, 0, sizeof *props);
8cf2daf3 72
1da177e4
LT
73 props->fw_ver = mdev->fw_ver;
74
87635b71
RD
75 init_query_mad(in_mad);
76 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1da177e4
LT
77
78 err = mthca_MAD_IFC(mdev, 1, 1,
79 1, NULL, NULL, in_mad, out_mad,
80 &status);
81 if (err)
82 goto out;
83 if (status) {
84 err = -EINVAL;
85 goto out;
86 }
87
8cf2daf3 88 props->device_cap_flags = mdev->device_cap_flags;
97f52eb4 89 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
1da177e4 90 0xffffff;
97f52eb4 91 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
a1c337af 92 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
1da177e4 93 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
1da177e4 94
8cf2daf3 95 props->max_mr_size = ~0ull;
0f69ce1e 96 props->page_size_cap = mdev->limits.page_size_cap;
8cf2daf3 97 props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps;
efaae8f7 98 props->max_qp_wr = mdev->limits.max_wqes;
8cf2daf3
RD
99 props->max_sge = mdev->limits.max_sg;
100 props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
efaae8f7 101 props->max_cqe = mdev->limits.max_cqes;
8cf2daf3
RD
102 props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
103 props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds;
104 props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift;
efaae8f7
JM
105 props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma;
106 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
107 props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs;
108 props->max_srq_wr = mdev->limits.max_srq_wqes;
59fef3b1 109 props->max_srq_sge = mdev->limits.max_srq_sge;
8cf2daf3 110 props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay;
2fa5e2eb 111 props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ?
33033b79 112 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
efaae8f7
JM
113 props->max_pkeys = mdev->limits.pkey_table_len;
114 props->max_mcast_grp = mdev->limits.num_mgms + mdev->limits.num_amgms;
115 props->max_mcast_qp_attach = MTHCA_QP_PER_MGM;
2fa5e2eb 116 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
efaae8f7 117 props->max_mcast_grp;
8cf2daf3 118
1da177e4
LT
119 err = 0;
120 out:
121 kfree(in_mad);
122 kfree(out_mad);
123 return err;
124}
125
126static int mthca_query_port(struct ib_device *ibdev,
127 u8 port, struct ib_port_attr *props)
128{
129 struct ib_smp *in_mad = NULL;
130 struct ib_smp *out_mad = NULL;
131 int err = -ENOMEM;
132 u8 status;
133
105e50a5 134 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1da177e4
LT
135 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
136 if (!in_mad || !out_mad)
137 goto out;
138
d1887ec2
RD
139 memset(props, 0, sizeof *props);
140
87635b71
RD
141 init_query_mad(in_mad);
142 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
143 in_mad->attr_mod = cpu_to_be32(port);
1da177e4
LT
144
145 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
146 port, NULL, NULL, in_mad, out_mad,
147 &status);
148 if (err)
149 goto out;
150 if (status) {
151 err = -EINVAL;
152 goto out;
153 }
154
97f52eb4 155 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
1da177e4 156 props->lmc = out_mad->data[34] & 0x7;
97f52eb4 157 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
1da177e4
LT
158 props->sm_sl = out_mad->data[36] & 0xf;
159 props->state = out_mad->data[32] & 0xf;
160 props->phys_state = out_mad->data[33] >> 4;
97f52eb4 161 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
1da177e4 162 props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len;
d1887ec2 163 props->max_msg_sz = 0x80000000;
1da177e4 164 props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len;
9825051e 165 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
97f52eb4 166 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
1da177e4
LT
167 props->active_width = out_mad->data[31] & 0xf;
168 props->active_speed = out_mad->data[35] >> 4;
9825051e
JM
169 props->max_mtu = out_mad->data[41] & 0xf;
170 props->active_mtu = out_mad->data[36] >> 4;
171 props->subnet_timeout = out_mad->data[51] & 0x1f;
1da177e4
LT
172
173 out:
174 kfree(in_mad);
175 kfree(out_mad);
176 return err;
177}
178
6dfc3901
RD
179static int mthca_modify_device(struct ib_device *ibdev,
180 int mask,
181 struct ib_device_modify *props)
182{
183 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
184 return -EOPNOTSUPP;
185
186 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
187 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
188 return -ERESTARTSYS;
189 memcpy(ibdev->node_desc, props->node_desc, 64);
190 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
191 }
192
193 return 0;
194}
195
1da177e4
LT
196static int mthca_modify_port(struct ib_device *ibdev,
197 u8 port, int port_modify_mask,
198 struct ib_port_modify *props)
199{
200 struct mthca_set_ib_param set_ib;
201 struct ib_port_attr attr;
202 int err;
203 u8 status;
204
fd9cfdd1 205 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
1da177e4
LT
206 return -ERESTARTSYS;
207
208 err = mthca_query_port(ibdev, port, &attr);
209 if (err)
210 goto out;
211
212 set_ib.set_si_guid = 0;
213 set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR);
214
215 set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
216 ~props->clr_port_cap_mask;
217
218 err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port, &status);
219 if (err)
220 goto out;
221 if (status) {
222 err = -EINVAL;
223 goto out;
224 }
225
226out:
fd9cfdd1 227 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
1da177e4
LT
228 return err;
229}
230
231static int mthca_query_pkey(struct ib_device *ibdev,
232 u8 port, u16 index, u16 *pkey)
233{
234 struct ib_smp *in_mad = NULL;
235 struct ib_smp *out_mad = NULL;
236 int err = -ENOMEM;
237 u8 status;
238
105e50a5 239 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1da177e4
LT
240 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
241 if (!in_mad || !out_mad)
242 goto out;
243
87635b71
RD
244 init_query_mad(in_mad);
245 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
246 in_mad->attr_mod = cpu_to_be32(index / 32);
1da177e4
LT
247
248 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
249 port, NULL, NULL, in_mad, out_mad,
250 &status);
251 if (err)
252 goto out;
253 if (status) {
254 err = -EINVAL;
255 goto out;
256 }
257
97f52eb4 258 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
1da177e4
LT
259
260 out:
261 kfree(in_mad);
262 kfree(out_mad);
263 return err;
264}
265
266static int mthca_query_gid(struct ib_device *ibdev, u8 port,
267 int index, union ib_gid *gid)
268{
269 struct ib_smp *in_mad = NULL;
270 struct ib_smp *out_mad = NULL;
271 int err = -ENOMEM;
272 u8 status;
273
105e50a5 274 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1da177e4
LT
275 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
276 if (!in_mad || !out_mad)
277 goto out;
278
87635b71
RD
279 init_query_mad(in_mad);
280 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
281 in_mad->attr_mod = cpu_to_be32(port);
1da177e4
LT
282
283 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
284 port, NULL, NULL, in_mad, out_mad,
285 &status);
286 if (err)
287 goto out;
288 if (status) {
289 err = -EINVAL;
290 goto out;
291 }
292
293 memcpy(gid->raw, out_mad->data + 8, 8);
294
87635b71
RD
295 init_query_mad(in_mad);
296 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
297 in_mad->attr_mod = cpu_to_be32(index / 8);
1da177e4
LT
298
299 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
300 port, NULL, NULL, in_mad, out_mad,
301 &status);
302 if (err)
303 goto out;
304 if (status) {
305 err = -EINVAL;
306 goto out;
307 }
308
309 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 16, 8);
310
311 out:
312 kfree(in_mad);
313 kfree(out_mad);
314 return err;
315}
316
5e0b537c
RD
317static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
318 struct ib_udata *udata)
319{
320 struct mthca_alloc_ucontext_resp uresp;
321 struct mthca_ucontext *context;
322 int err;
323
324 memset(&uresp, 0, sizeof uresp);
325
326 uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
327 if (mthca_is_memfree(to_mdev(ibdev)))
328 uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size;
329 else
330 uresp.uarc_size = 0;
331
332 context = kmalloc(sizeof *context, GFP_KERNEL);
333 if (!context)
334 return ERR_PTR(-ENOMEM);
335
336 err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
337 if (err) {
338 kfree(context);
339 return ERR_PTR(err);
340 }
341
342 context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
343 if (IS_ERR(context->db_tab)) {
344 err = PTR_ERR(context->db_tab);
345 mthca_uar_free(to_mdev(ibdev), &context->uar);
346 kfree(context);
347 return ERR_PTR(err);
348 }
349
350 if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
351 mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab);
352 mthca_uar_free(to_mdev(ibdev), &context->uar);
353 kfree(context);
354 return ERR_PTR(-EFAULT);
355 }
356
357 return &context->ibucontext;
358}
359
360static int mthca_dealloc_ucontext(struct ib_ucontext *context)
361{
362 mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar,
363 to_mucontext(context)->db_tab);
364 mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar);
365 kfree(to_mucontext(context));
366
367 return 0;
368}
369
53b8b3ff
RD
370static int mthca_mmap_uar(struct ib_ucontext *context,
371 struct vm_area_struct *vma)
372{
373 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
374 return -EINVAL;
375
376 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
377
6d376756
MT
378 if (io_remap_pfn_range(vma, vma->vm_start,
379 to_mucontext(context)->uar.pfn,
380 PAGE_SIZE, vma->vm_page_prot))
53b8b3ff
RD
381 return -EAGAIN;
382
383 return 0;
384}
385
1cf296b6
RD
386static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
387 struct ib_ucontext *context,
388 struct ib_udata *udata)
1da177e4
LT
389{
390 struct mthca_pd *pd;
391 int err;
392
393 pd = kmalloc(sizeof *pd, GFP_KERNEL);
394 if (!pd)
395 return ERR_PTR(-ENOMEM);
396
99264c1e 397 err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
1da177e4
LT
398 if (err) {
399 kfree(pd);
400 return ERR_PTR(err);
401 }
402
99264c1e
RD
403 if (context) {
404 if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
405 mthca_pd_free(to_mdev(ibdev), pd);
406 kfree(pd);
407 return ERR_PTR(-EFAULT);
408 }
409 }
410
1da177e4
LT
411 return &pd->ibpd;
412}
413
414static int mthca_dealloc_pd(struct ib_pd *pd)
415{
416 mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
417 kfree(pd);
418
419 return 0;
420}
421
422static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
423 struct ib_ah_attr *ah_attr)
424{
425 int err;
426 struct mthca_ah *ah;
427
8df8a34d 428 ah = kmalloc(sizeof *ah, GFP_ATOMIC);
1da177e4
LT
429 if (!ah)
430 return ERR_PTR(-ENOMEM);
431
432 err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah);
433 if (err) {
434 kfree(ah);
435 return ERR_PTR(err);
436 }
437
438 return &ah->ibah;
439}
440
441static int mthca_ah_destroy(struct ib_ah *ah)
442{
443 mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
444 kfree(ah);
445
446 return 0;
447}
448
ec34a922
RD
449static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
450 struct ib_srq_init_attr *init_attr,
451 struct ib_udata *udata)
452{
453 struct mthca_create_srq ucmd;
454 struct mthca_ucontext *context = NULL;
455 struct mthca_srq *srq;
456 int err;
457
458 srq = kmalloc(sizeof *srq, GFP_KERNEL);
459 if (!srq)
460 return ERR_PTR(-ENOMEM);
461
462 if (pd->uobject) {
463 context = to_mucontext(pd->uobject->context);
464
17e2e819
JM
465 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
466 err = -EFAULT;
467 goto err_free;
468 }
ec34a922
RD
469
470 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
471 context->db_tab, ucmd.db_index,
472 ucmd.db_page);
473
474 if (err)
475 goto err_free;
476
477 srq->mr.ibmr.lkey = ucmd.lkey;
478 srq->db_index = ucmd.db_index;
479 }
480
481 err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
482 &init_attr->attr, srq);
483
484 if (err && pd->uobject)
485 mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
486 context->db_tab, ucmd.db_index);
487
488 if (err)
489 goto err_free;
490
491 if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
492 mthca_free_srq(to_mdev(pd->device), srq);
493 err = -EFAULT;
494 goto err_free;
495 }
496
497 return &srq->ibsrq;
498
499err_free:
500 kfree(srq);
501
502 return ERR_PTR(err);
503}
504
505static int mthca_destroy_srq(struct ib_srq *srq)
506{
507 struct mthca_ucontext *context;
508
509 if (srq->uobject) {
510 context = to_mucontext(srq->uobject->context);
511
512 mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
513 context->db_tab, to_msrq(srq)->db_index);
514 }
515
516 mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
517 kfree(srq);
518
519 return 0;
520}
521
1da177e4 522static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
1cf296b6
RD
523 struct ib_qp_init_attr *init_attr,
524 struct ib_udata *udata)
1da177e4 525{
80c8ec2c 526 struct mthca_create_qp ucmd;
1da177e4
LT
527 struct mthca_qp *qp;
528 int err;
529
530 switch (init_attr->qp_type) {
531 case IB_QPT_RC:
532 case IB_QPT_UC:
533 case IB_QPT_UD:
534 {
80c8ec2c
RD
535 struct mthca_ucontext *context;
536
1da177e4
LT
537 qp = kmalloc(sizeof *qp, GFP_KERNEL);
538 if (!qp)
539 return ERR_PTR(-ENOMEM);
540
80c8ec2c
RD
541 if (pd->uobject) {
542 context = to_mucontext(pd->uobject->context);
543
17e2e819
JM
544 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
545 kfree(qp);
80c8ec2c 546 return ERR_PTR(-EFAULT);
17e2e819 547 }
80c8ec2c
RD
548
549 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
550 context->db_tab,
551 ucmd.sq_db_index, ucmd.sq_db_page);
552 if (err) {
553 kfree(qp);
554 return ERR_PTR(err);
555 }
556
557 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
558 context->db_tab,
559 ucmd.rq_db_index, ucmd.rq_db_page);
560 if (err) {
561 mthca_unmap_user_db(to_mdev(pd->device),
562 &context->uar,
563 context->db_tab,
564 ucmd.sq_db_index);
565 kfree(qp);
566 return ERR_PTR(err);
567 }
568
569 qp->mr.ibmr.lkey = ucmd.lkey;
570 qp->sq.db_index = ucmd.sq_db_index;
571 qp->rq.db_index = ucmd.rq_db_index;
572 }
1da177e4
LT
573
574 err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
575 to_mcq(init_attr->send_cq),
576 to_mcq(init_attr->recv_cq),
577 init_attr->qp_type, init_attr->sq_sig_type,
80c8ec2c
RD
578 &init_attr->cap, qp);
579
580 if (err && pd->uobject) {
581 context = to_mucontext(pd->uobject->context);
582
583 mthca_unmap_user_db(to_mdev(pd->device),
584 &context->uar,
585 context->db_tab,
586 ucmd.sq_db_index);
587 mthca_unmap_user_db(to_mdev(pd->device),
588 &context->uar,
589 context->db_tab,
590 ucmd.rq_db_index);
591 }
592
1da177e4
LT
593 qp->ibqp.qp_num = qp->qpn;
594 break;
595 }
596 case IB_QPT_SMI:
597 case IB_QPT_GSI:
598 {
80c8ec2c
RD
599 /* Don't allow userspace to create special QPs */
600 if (pd->uobject)
601 return ERR_PTR(-EINVAL);
602
1da177e4
LT
603 qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
604 if (!qp)
605 return ERR_PTR(-ENOMEM);
606
1da177e4
LT
607 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
608
609 err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
610 to_mcq(init_attr->send_cq),
611 to_mcq(init_attr->recv_cq),
80c8ec2c 612 init_attr->sq_sig_type, &init_attr->cap,
1da177e4
LT
613 qp->ibqp.qp_num, init_attr->port_num,
614 to_msqp(qp));
615 break;
616 }
617 default:
618 /* Don't support raw QPs */
619 return ERR_PTR(-ENOSYS);
620 }
621
622 if (err) {
623 kfree(qp);
624 return ERR_PTR(err);
625 }
626
80c8ec2c
RD
627 init_attr->cap.max_send_wr = qp->sq.max;
628 init_attr->cap.max_recv_wr = qp->rq.max;
629 init_attr->cap.max_send_sge = qp->sq.max_gs;
630 init_attr->cap.max_recv_sge = qp->rq.max_gs;
77369ed3 631 init_attr->cap.max_inline_data = qp->max_inline_data;
1da177e4
LT
632
633 return &qp->ibqp;
634}
635
636static int mthca_destroy_qp(struct ib_qp *qp)
637{
80c8ec2c
RD
638 if (qp->uobject) {
639 mthca_unmap_user_db(to_mdev(qp->device),
640 &to_mucontext(qp->uobject->context)->uar,
641 to_mucontext(qp->uobject->context)->db_tab,
642 to_mqp(qp)->sq.db_index);
643 mthca_unmap_user_db(to_mdev(qp->device),
644 &to_mucontext(qp->uobject->context)->uar,
645 to_mucontext(qp->uobject->context)->db_tab,
646 to_mqp(qp)->rq.db_index);
647 }
1da177e4
LT
648 mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
649 kfree(qp);
650 return 0;
651}
652
1cf296b6
RD
653static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
654 struct ib_ucontext *context,
655 struct ib_udata *udata)
1da177e4 656{
74c2174e 657 struct mthca_create_cq ucmd;
1da177e4
LT
658 struct mthca_cq *cq;
659 int nent;
660 int err;
661
efaae8f7
JM
662 if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
663 return ERR_PTR(-EINVAL);
664
74c2174e
RD
665 if (context) {
666 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
667 return ERR_PTR(-EFAULT);
668
669 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
670 to_mucontext(context)->db_tab,
671 ucmd.set_db_index, ucmd.set_db_page);
672 if (err)
673 return ERR_PTR(err);
674
675 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
676 to_mucontext(context)->db_tab,
677 ucmd.arm_db_index, ucmd.arm_db_page);
678 if (err)
679 goto err_unmap_set;
680 }
681
1da177e4 682 cq = kmalloc(sizeof *cq, GFP_KERNEL);
74c2174e
RD
683 if (!cq) {
684 err = -ENOMEM;
685 goto err_unmap_arm;
686 }
687
688 if (context) {
4885bf64
RD
689 cq->buf.mr.ibmr.lkey = ucmd.lkey;
690 cq->set_ci_db_index = ucmd.set_db_index;
691 cq->arm_db_index = ucmd.arm_db_index;
74c2174e 692 }
1da177e4
LT
693
694 for (nent = 1; nent <= entries; nent <<= 1)
695 ; /* nothing */
696
74c2174e
RD
697 err = mthca_init_cq(to_mdev(ibdev), nent,
698 context ? to_mucontext(context) : NULL,
699 context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
700 cq);
701 if (err)
702 goto err_free;
703
704 if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
705 mthca_free_cq(to_mdev(ibdev), cq);
706 goto err_free;
1da177e4
LT
707 }
708
4885bf64
RD
709 cq->resize_buf = NULL;
710
1da177e4 711 return &cq->ibcq;
74c2174e
RD
712
713err_free:
714 kfree(cq);
715
716err_unmap_arm:
717 if (context)
718 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
719 to_mucontext(context)->db_tab, ucmd.arm_db_index);
720
721err_unmap_set:
722 if (context)
723 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
724 to_mucontext(context)->db_tab, ucmd.set_db_index);
725
726 return ERR_PTR(err);
1da177e4
LT
727}
728
4885bf64
RD
729static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq,
730 int entries)
731{
732 int ret;
733
734 spin_lock_irq(&cq->lock);
735 if (cq->resize_buf) {
736 ret = -EBUSY;
737 goto unlock;
738 }
739
740 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
741 if (!cq->resize_buf) {
742 ret = -ENOMEM;
743 goto unlock;
744 }
745
746 cq->resize_buf->state = CQ_RESIZE_ALLOC;
747
748 ret = 0;
749
750unlock:
751 spin_unlock_irq(&cq->lock);
752
753 if (ret)
754 return ret;
755
756 ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
757 if (ret) {
758 spin_lock_irq(&cq->lock);
759 kfree(cq->resize_buf);
760 cq->resize_buf = NULL;
761 spin_unlock_irq(&cq->lock);
762 return ret;
763 }
764
765 cq->resize_buf->cqe = entries - 1;
766
767 spin_lock_irq(&cq->lock);
768 cq->resize_buf->state = CQ_RESIZE_READY;
769 spin_unlock_irq(&cq->lock);
770
771 return 0;
772}
773
774static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
775{
776 struct mthca_dev *dev = to_mdev(ibcq->device);
777 struct mthca_cq *cq = to_mcq(ibcq);
778 struct mthca_resize_cq ucmd;
779 u32 lkey;
780 u8 status;
781 int ret;
782
783 if (entries < 1 || entries > dev->limits.max_cqes)
784 return -EINVAL;
785
786 entries = roundup_pow_of_two(entries + 1);
787 if (entries == ibcq->cqe + 1)
788 return 0;
789
790 if (cq->is_kernel) {
791 ret = mthca_alloc_resize_buf(dev, cq, entries);
792 if (ret)
793 return ret;
794 lkey = cq->resize_buf->buf.mr.ibmr.lkey;
795 } else {
796 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
797 return -EFAULT;
798 lkey = ucmd.lkey;
799 }
800
801 ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, long_log2(entries), &status);
802 if (status)
803 ret = -EINVAL;
804
805 if (ret) {
806 if (cq->resize_buf) {
807 mthca_free_cq_buf(dev, &cq->resize_buf->buf,
808 cq->resize_buf->cqe);
809 kfree(cq->resize_buf);
810 spin_lock_irq(&cq->lock);
811 cq->resize_buf = NULL;
812 spin_unlock_irq(&cq->lock);
813 }
814 return ret;
815 }
816
817 if (cq->is_kernel) {
818 struct mthca_cq_buf tbuf;
819 int tcqe;
820
821 spin_lock_irq(&cq->lock);
822 if (cq->resize_buf->state == CQ_RESIZE_READY) {
823 mthca_cq_resize_copy_cqes(cq);
824 tbuf = cq->buf;
825 tcqe = cq->ibcq.cqe;
826 cq->buf = cq->resize_buf->buf;
827 cq->ibcq.cqe = cq->resize_buf->cqe;
828 } else {
829 tbuf = cq->resize_buf->buf;
830 tcqe = cq->resize_buf->cqe;
831 }
832
833 kfree(cq->resize_buf);
834 cq->resize_buf = NULL;
835 spin_unlock_irq(&cq->lock);
836
837 mthca_free_cq_buf(dev, &tbuf, tcqe);
838 } else
839 ibcq->cqe = entries - 1;
840
841 return 0;
842}
843
1da177e4
LT
844static int mthca_destroy_cq(struct ib_cq *cq)
845{
74c2174e
RD
846 if (cq->uobject) {
847 mthca_unmap_user_db(to_mdev(cq->device),
848 &to_mucontext(cq->uobject->context)->uar,
849 to_mucontext(cq->uobject->context)->db_tab,
850 to_mcq(cq)->arm_db_index);
851 mthca_unmap_user_db(to_mdev(cq->device),
852 &to_mucontext(cq->uobject->context)->uar,
853 to_mucontext(cq->uobject->context)->db_tab,
854 to_mcq(cq)->set_ci_db_index);
855 }
1da177e4
LT
856 mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
857 kfree(cq);
858
859 return 0;
860}
861
862static inline u32 convert_access(int acc)
863{
864 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC : 0) |
865 (acc & IB_ACCESS_REMOTE_WRITE ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) |
866 (acc & IB_ACCESS_REMOTE_READ ? MTHCA_MPT_FLAG_REMOTE_READ : 0) |
867 (acc & IB_ACCESS_LOCAL_WRITE ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0) |
868 MTHCA_MPT_FLAG_LOCAL_READ;
869}
870
871static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
872{
873 struct mthca_mr *mr;
874 int err;
875
876 mr = kmalloc(sizeof *mr, GFP_KERNEL);
877 if (!mr)
878 return ERR_PTR(-ENOMEM);
879
880 err = mthca_mr_alloc_notrans(to_mdev(pd->device),
881 to_mpd(pd)->pd_num,
882 convert_access(acc), mr);
883
884 if (err) {
885 kfree(mr);
886 return ERR_PTR(err);
887 }
888
889 return &mr->ibmr;
890}
891
892static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
893 struct ib_phys_buf *buffer_list,
894 int num_phys_buf,
895 int acc,
896 u64 *iova_start)
897{
898 struct mthca_mr *mr;
899 u64 *page_list;
900 u64 total_size;
901 u64 mask;
902 int shift;
903 int npages;
904 int err;
905 int i, j, n;
906
907 /* First check that we have enough alignment */
908 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK))
909 return ERR_PTR(-EINVAL);
910
1da177e4
LT
911 mask = 0;
912 total_size = 0;
913 for (i = 0; i < num_phys_buf; ++i) {
6627fa66
MT
914 if (i != 0)
915 mask |= buffer_list[i].addr;
916 if (i != num_phys_buf - 1)
917 mask |= buffer_list[i].addr + buffer_list[i].size;
1da177e4
LT
918
919 total_size += buffer_list[i].size;
1da177e4
LT
920 }
921
6627fa66
MT
922 if (mask & ~PAGE_MASK)
923 return ERR_PTR(-EINVAL);
924
1da177e4
LT
925 /* Find largest page shift we can use to cover buffers */
926 for (shift = PAGE_SHIFT; shift < 31; ++shift)
927 if (num_phys_buf > 1) {
928 if ((1ULL << shift) & mask)
929 break;
930 } else {
931 if (1ULL << shift >=
932 buffer_list[0].size +
933 (buffer_list[0].addr & ((1ULL << shift) - 1)))
934 break;
935 }
936
937 buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
938 buffer_list[0].addr &= ~0ull << shift;
939
940 mr = kmalloc(sizeof *mr, GFP_KERNEL);
941 if (!mr)
942 return ERR_PTR(-ENOMEM);
943
944 npages = 0;
945 for (i = 0; i < num_phys_buf; ++i)
946 npages += (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
947
948 if (!npages)
949 return &mr->ibmr;
950
951 page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL);
952 if (!page_list) {
953 kfree(mr);
954 return ERR_PTR(-ENOMEM);
955 }
956
957 n = 0;
958 for (i = 0; i < num_phys_buf; ++i)
959 for (j = 0;
960 j < (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
961 ++j)
962 page_list[n++] = buffer_list[i].addr + ((u64) j << shift);
963
964 mthca_dbg(to_mdev(pd->device), "Registering memory at %llx (iova %llx) "
965 "in PD %x; shift %d, npages %d.\n",
966 (unsigned long long) buffer_list[0].addr,
967 (unsigned long long) *iova_start,
968 to_mpd(pd)->pd_num,
969 shift, npages);
970
971 err = mthca_mr_alloc_phys(to_mdev(pd->device),
972 to_mpd(pd)->pd_num,
973 page_list, shift, npages,
974 *iova_start, total_size,
975 convert_access(acc), mr);
976
977 if (err) {
761f9eb8 978 kfree(page_list);
1da177e4
LT
979 kfree(mr);
980 return ERR_PTR(err);
981 }
982
983 kfree(page_list);
984 return &mr->ibmr;
985}
986
24d4281b
RD
987static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
988 int acc, struct ib_udata *udata)
989{
990 struct mthca_dev *dev = to_mdev(pd->device);
991 struct ib_umem_chunk *chunk;
992 struct mthca_mr *mr;
993 u64 *pages;
994 int shift, n, len;
995 int i, j, k;
996 int err = 0;
997
998 shift = ffs(region->page_size) - 1;
999
1000 mr = kmalloc(sizeof *mr, GFP_KERNEL);
1001 if (!mr)
1002 return ERR_PTR(-ENOMEM);
1003
1004 n = 0;
1005 list_for_each_entry(chunk, &region->chunk_list, list)
1006 n += chunk->nents;
1007
1008 mr->mtt = mthca_alloc_mtt(dev, n);
1009 if (IS_ERR(mr->mtt)) {
1010 err = PTR_ERR(mr->mtt);
1011 goto err;
1012 }
1013
1014 pages = (u64 *) __get_free_page(GFP_KERNEL);
1015 if (!pages) {
1016 err = -ENOMEM;
1017 goto err_mtt;
1018 }
1019
1020 i = n = 0;
1021
1022 list_for_each_entry(chunk, &region->chunk_list, list)
1023 for (j = 0; j < chunk->nmap; ++j) {
1024 len = sg_dma_len(&chunk->page_list[j]) >> shift;
1025 for (k = 0; k < len; ++k) {
1026 pages[i++] = sg_dma_address(&chunk->page_list[j]) +
1027 region->page_size * k;
1028 /*
1029 * Be friendly to WRITE_MTT command
1030 * and leave two empty slots for the
1031 * index and reserved fields of the
1032 * mailbox.
1033 */
1034 if (i == PAGE_SIZE / sizeof (u64) - 2) {
1035 err = mthca_write_mtt(dev, mr->mtt,
1036 n, pages, i);
1037 if (err)
1038 goto mtt_done;
1039 n += i;
1040 i = 0;
1041 }
1042 }
1043 }
1044
1045 if (i)
1046 err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
1047mtt_done:
1048 free_page((unsigned long) pages);
1049 if (err)
1050 goto err_mtt;
1051
1052 err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, region->virt_base,
1053 region->length, convert_access(acc), mr);
1054
1055 if (err)
1056 goto err_mtt;
1057
1058 return &mr->ibmr;
1059
1060err_mtt:
1061 mthca_free_mtt(dev, mr->mtt);
1062
1063err:
1064 kfree(mr);
1065 return ERR_PTR(err);
1066}
1067
1da177e4
LT
1068static int mthca_dereg_mr(struct ib_mr *mr)
1069{
e464b2a6
RD
1070 struct mthca_mr *mmr = to_mmr(mr);
1071 mthca_free_mr(to_mdev(mr->device), mmr);
1072 kfree(mmr);
1da177e4
LT
1073 return 0;
1074}
1075
e0f5fdca
MT
1076static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
1077 struct ib_fmr_attr *fmr_attr)
1078{
1079 struct mthca_fmr *fmr;
1080 int err;
1081
1082 fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
1083 if (!fmr)
1084 return ERR_PTR(-ENOMEM);
1085
1086 memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
1087 err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
1088 convert_access(mr_access_flags), fmr);
1089
1090 if (err) {
1091 kfree(fmr);
1092 return ERR_PTR(err);
1093 }
1094
1095 return &fmr->ibmr;
1096}
1097
1098static int mthca_dealloc_fmr(struct ib_fmr *fmr)
1099{
1100 struct mthca_fmr *mfmr = to_mfmr(fmr);
1101 int err;
1102
1103 err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
1104 if (err)
1105 return err;
1106
1107 kfree(mfmr);
1108 return 0;
1109}
1110
1111static int mthca_unmap_fmr(struct list_head *fmr_list)
1112{
1113 struct ib_fmr *fmr;
1114 int err;
1115 u8 status;
1116 struct mthca_dev *mdev = NULL;
1117
1118 list_for_each_entry(fmr, fmr_list, list) {
1119 if (mdev && to_mdev(fmr->device) != mdev)
1120 return -EINVAL;
1121 mdev = to_mdev(fmr->device);
1122 }
1123
1124 if (!mdev)
1125 return 0;
1126
d10ddbf6 1127 if (mthca_is_memfree(mdev)) {
e0f5fdca
MT
1128 list_for_each_entry(fmr, fmr_list, list)
1129 mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
1130
1131 wmb();
1132 } else
1133 list_for_each_entry(fmr, fmr_list, list)
1134 mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
1135
1136 err = mthca_SYNC_TPT(mdev, &status);
1137 if (err)
1138 return err;
1139 if (status)
1140 return -EINVAL;
1141 return 0;
1142}
1143
1da177e4
LT
1144static ssize_t show_rev(struct class_device *cdev, char *buf)
1145{
1146 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
1147 return sprintf(buf, "%x\n", dev->rev_id);
1148}
1149
1150static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
1151{
1152 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
87cfe323 1153 return sprintf(buf, "%d.%d.%d\n", (int) (dev->fw_ver >> 32),
1da177e4
LT
1154 (int) (dev->fw_ver >> 16) & 0xffff,
1155 (int) dev->fw_ver & 0xffff);
1156}
1157
1158static ssize_t show_hca(struct class_device *cdev, char *buf)
1159{
1160 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
68a3c212
RD
1161 switch (dev->pdev->device) {
1162 case PCI_DEVICE_ID_MELLANOX_TAVOR:
1163 return sprintf(buf, "MT23108\n");
1164 case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT:
1165 return sprintf(buf, "MT25208 (MT23108 compat mode)\n");
1166 case PCI_DEVICE_ID_MELLANOX_ARBEL:
1167 return sprintf(buf, "MT25208\n");
1168 case PCI_DEVICE_ID_MELLANOX_SINAI:
1169 case PCI_DEVICE_ID_MELLANOX_SINAI_OLD:
1170 return sprintf(buf, "MT25204\n");
1171 default:
1172 return sprintf(buf, "unknown\n");
1da177e4
LT
1173 }
1174}
1175
2e8b981c
MT
1176static ssize_t show_board(struct class_device *cdev, char *buf)
1177{
1178 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
1179 return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
1180}
1181
1da177e4
LT
1182static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1183static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
1184static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
2e8b981c 1185static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
1da177e4
LT
1186
1187static struct class_device_attribute *mthca_class_attributes[] = {
1188 &class_device_attr_hw_rev,
1189 &class_device_attr_fw_ver,
2e8b981c
MT
1190 &class_device_attr_hca_type,
1191 &class_device_attr_board_id
1da177e4
LT
1192};
1193
cf311cd4
SH
1194static int mthca_init_node_data(struct mthca_dev *dev)
1195{
1196 struct ib_smp *in_mad = NULL;
1197 struct ib_smp *out_mad = NULL;
1198 int err = -ENOMEM;
1199 u8 status;
1200
1201 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1202 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1203 if (!in_mad || !out_mad)
1204 goto out;
1205
1206 init_query_mad(in_mad);
6dfc3901
RD
1207 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1208
1209 err = mthca_MAD_IFC(dev, 1, 1,
1210 1, NULL, NULL, in_mad, out_mad,
1211 &status);
1212 if (err)
1213 goto out;
1214 if (status) {
1215 err = -EINVAL;
1216 goto out;
1217 }
1218
1219 memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
1220
cf311cd4
SH
1221 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1222
1223 err = mthca_MAD_IFC(dev, 1, 1,
1224 1, NULL, NULL, in_mad, out_mad,
1225 &status);
1226 if (err)
1227 goto out;
1228 if (status) {
1229 err = -EINVAL;
1230 goto out;
1231 }
1232
1233 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1234
1235out:
1236 kfree(in_mad);
1237 kfree(out_mad);
1238 return err;
1239}
1240
1da177e4
LT
1241int mthca_register_device(struct mthca_dev *dev)
1242{
1243 int ret;
1244 int i;
1245
cf311cd4
SH
1246 ret = mthca_init_node_data(dev);
1247 if (ret)
1248 return ret;
1249
1da177e4 1250 strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
1cf296b6
RD
1251 dev->ib_dev.owner = THIS_MODULE;
1252
274c0891 1253 dev->ib_dev.uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION;
883a99c7
RD
1254 dev->ib_dev.uverbs_cmd_mask =
1255 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1256 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1257 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1258 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1259 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1260 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1261 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1262 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1263 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
4885bf64 1264 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
883a99c7
RD
1265 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1266 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
8ebe5077 1267 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
883a99c7
RD
1268 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1269 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1270 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
1271 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
1272 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1273 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
8ebe5077 1274 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
883a99c7 1275 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1da177e4
LT
1276 dev->ib_dev.node_type = IB_NODE_CA;
1277 dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
1278 dev->ib_dev.dma_device = &dev->pdev->dev;
1279 dev->ib_dev.class_dev.dev = &dev->pdev->dev;
1280 dev->ib_dev.query_device = mthca_query_device;
1281 dev->ib_dev.query_port = mthca_query_port;
6dfc3901 1282 dev->ib_dev.modify_device = mthca_modify_device;
1da177e4
LT
1283 dev->ib_dev.modify_port = mthca_modify_port;
1284 dev->ib_dev.query_pkey = mthca_query_pkey;
1285 dev->ib_dev.query_gid = mthca_query_gid;
5e0b537c
RD
1286 dev->ib_dev.alloc_ucontext = mthca_alloc_ucontext;
1287 dev->ib_dev.dealloc_ucontext = mthca_dealloc_ucontext;
53b8b3ff 1288 dev->ib_dev.mmap = mthca_mmap_uar;
1da177e4
LT
1289 dev->ib_dev.alloc_pd = mthca_alloc_pd;
1290 dev->ib_dev.dealloc_pd = mthca_dealloc_pd;
1291 dev->ib_dev.create_ah = mthca_ah_create;
1d89b1ae 1292 dev->ib_dev.query_ah = mthca_ah_query;
1da177e4 1293 dev->ib_dev.destroy_ah = mthca_ah_destroy;
ec34a922
RD
1294
1295 if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
1296 dev->ib_dev.create_srq = mthca_create_srq;
8ebe5077
EC
1297 dev->ib_dev.modify_srq = mthca_modify_srq;
1298 dev->ib_dev.query_srq = mthca_query_srq;
ec34a922
RD
1299 dev->ib_dev.destroy_srq = mthca_destroy_srq;
1300
1301 if (mthca_is_memfree(dev))
1302 dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
1303 else
1304 dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv;
1305 }
1306
1da177e4
LT
1307 dev->ib_dev.create_qp = mthca_create_qp;
1308 dev->ib_dev.modify_qp = mthca_modify_qp;
8ebe5077 1309 dev->ib_dev.query_qp = mthca_query_qp;
1da177e4
LT
1310 dev->ib_dev.destroy_qp = mthca_destroy_qp;
1311 dev->ib_dev.create_cq = mthca_create_cq;
4885bf64 1312 dev->ib_dev.resize_cq = mthca_resize_cq;
1da177e4
LT
1313 dev->ib_dev.destroy_cq = mthca_destroy_cq;
1314 dev->ib_dev.poll_cq = mthca_poll_cq;
1315 dev->ib_dev.get_dma_mr = mthca_get_dma_mr;
1316 dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr;
24d4281b 1317 dev->ib_dev.reg_user_mr = mthca_reg_user_mr;
1da177e4 1318 dev->ib_dev.dereg_mr = mthca_dereg_mr;
e0f5fdca
MT
1319
1320 if (dev->mthca_flags & MTHCA_FLAG_FMR) {
1321 dev->ib_dev.alloc_fmr = mthca_alloc_fmr;
1322 dev->ib_dev.unmap_fmr = mthca_unmap_fmr;
1323 dev->ib_dev.dealloc_fmr = mthca_dealloc_fmr;
d10ddbf6 1324 if (mthca_is_memfree(dev))
e0f5fdca
MT
1325 dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr;
1326 else
1327 dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr;
1328 }
1329
1da177e4
LT
1330 dev->ib_dev.attach_mcast = mthca_multicast_attach;
1331 dev->ib_dev.detach_mcast = mthca_multicast_detach;
1332 dev->ib_dev.process_mad = mthca_process_mad;
1333
d10ddbf6 1334 if (mthca_is_memfree(dev)) {
1da177e4
LT
1335 dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq;
1336 dev->ib_dev.post_send = mthca_arbel_post_send;
1337 dev->ib_dev.post_recv = mthca_arbel_post_receive;
1338 } else {
1339 dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq;
1340 dev->ib_dev.post_send = mthca_tavor_post_send;
1341 dev->ib_dev.post_recv = mthca_tavor_post_receive;
1342 }
1343
fd9cfdd1 1344 mutex_init(&dev->cap_mask_mutex);
1da177e4
LT
1345
1346 ret = ib_register_device(&dev->ib_dev);
1347 if (ret)
1348 return ret;
1349
1350 for (i = 0; i < ARRAY_SIZE(mthca_class_attributes); ++i) {
1351 ret = class_device_create_file(&dev->ib_dev.class_dev,
1352 mthca_class_attributes[i]);
1353 if (ret) {
1354 ib_unregister_device(&dev->ib_dev);
1355 return ret;
1356 }
1357 }
1358
3d155f8c
RD
1359 mthca_start_catas_poll(dev);
1360
1da177e4
LT
1361 return 0;
1362}
1363
1364void mthca_unregister_device(struct mthca_dev *dev)
1365{
3d155f8c 1366 mthca_stop_catas_poll(dev);
1da177e4
LT
1367 ib_unregister_device(&dev->ib_dev);
1368}