IB/mthca: Don't allow userspace open while recovering from catastrophic error
[linux-block.git] / drivers / infiniband / hw / mthca / mthca_provider.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
cd4e8fb4 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4885bf64 4 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
2a1d9b7f
RD
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
1da177e4
LT
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
1da177e4
LT
35 */
36
a4d61e84 37#include <rdma/ib_smi.h>
f7c6a7b5 38#include <rdma/ib_umem.h>
883a99c7 39#include <rdma/ib_user_verbs.h>
baaad380
RD
40
41#include <linux/sched.h>
53b8b3ff 42#include <linux/mm.h>
1da177e4
LT
43
44#include "mthca_dev.h"
45#include "mthca_cmd.h"
5e0b537c
RD
46#include "mthca_user.h"
47#include "mthca_memfree.h"
1da177e4 48
87635b71
RD
49static void init_query_mad(struct ib_smp *mad)
50{
51 mad->base_version = 1;
52 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
53 mad->class_version = 1;
54 mad->method = IB_MGMT_METHOD_GET;
55}
56
1da177e4
LT
57static int mthca_query_device(struct ib_device *ibdev,
58 struct ib_device_attr *props)
59{
60 struct ib_smp *in_mad = NULL;
61 struct ib_smp *out_mad = NULL;
62 int err = -ENOMEM;
b3999393 63 struct mthca_dev *mdev = to_mdev(ibdev);
1da177e4
LT
64
65 u8 status;
66
105e50a5 67 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1da177e4
LT
68 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
69 if (!in_mad || !out_mad)
70 goto out;
71
a852092e 72 memset(props, 0, sizeof *props);
8cf2daf3 73
1da177e4
LT
74 props->fw_ver = mdev->fw_ver;
75
87635b71
RD
76 init_query_mad(in_mad);
77 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1da177e4
LT
78
79 err = mthca_MAD_IFC(mdev, 1, 1,
80 1, NULL, NULL, in_mad, out_mad,
81 &status);
82 if (err)
83 goto out;
84 if (status) {
85 err = -EINVAL;
86 goto out;
87 }
88
8cf2daf3 89 props->device_cap_flags = mdev->device_cap_flags;
97f52eb4 90 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
1da177e4 91 0xffffff;
97f52eb4 92 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
a1c337af 93 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
1da177e4 94 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
1da177e4 95
8cf2daf3 96 props->max_mr_size = ~0ull;
0f69ce1e 97 props->page_size_cap = mdev->limits.page_size_cap;
8cf2daf3 98 props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps;
efaae8f7 99 props->max_qp_wr = mdev->limits.max_wqes;
8cf2daf3
RD
100 props->max_sge = mdev->limits.max_sg;
101 props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
efaae8f7 102 props->max_cqe = mdev->limits.max_cqes;
8cf2daf3
RD
103 props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
104 props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds;
105 props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift;
efaae8f7
JM
106 props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma;
107 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
108 props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs;
109 props->max_srq_wr = mdev->limits.max_srq_wqes;
59fef3b1 110 props->max_srq_sge = mdev->limits.max_srq_sge;
8cf2daf3 111 props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay;
2fa5e2eb 112 props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ?
33033b79 113 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
efaae8f7
JM
114 props->max_pkeys = mdev->limits.pkey_table_len;
115 props->max_mcast_grp = mdev->limits.num_mgms + mdev->limits.num_amgms;
116 props->max_mcast_qp_attach = MTHCA_QP_PER_MGM;
2fa5e2eb 117 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
efaae8f7 118 props->max_mcast_grp;
d4cb0784
OG
119 /*
120 * If Sinai memory key optimization is being used, then only
121 * the 8-bit key portion will change. For other HCAs, the
122 * unused index bits will also be used for FMR remapping.
123 */
124 if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
125 props->max_map_per_fmr = 255;
126 else
127 props->max_map_per_fmr =
f0d1b0b3 128 (1 << (32 - ilog2(mdev->limits.num_mpts))) - 1;
8cf2daf3 129
1da177e4
LT
130 err = 0;
131 out:
132 kfree(in_mad);
133 kfree(out_mad);
134 return err;
135}
136
137static int mthca_query_port(struct ib_device *ibdev,
138 u8 port, struct ib_port_attr *props)
139{
140 struct ib_smp *in_mad = NULL;
141 struct ib_smp *out_mad = NULL;
142 int err = -ENOMEM;
143 u8 status;
144
105e50a5 145 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1da177e4
LT
146 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
147 if (!in_mad || !out_mad)
148 goto out;
149
d1887ec2
RD
150 memset(props, 0, sizeof *props);
151
87635b71
RD
152 init_query_mad(in_mad);
153 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
154 in_mad->attr_mod = cpu_to_be32(port);
1da177e4
LT
155
156 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
157 port, NULL, NULL, in_mad, out_mad,
158 &status);
159 if (err)
160 goto out;
161 if (status) {
162 err = -EINVAL;
163 goto out;
164 }
165
97f52eb4 166 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
1da177e4 167 props->lmc = out_mad->data[34] & 0x7;
97f52eb4 168 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
1da177e4
LT
169 props->sm_sl = out_mad->data[36] & 0xf;
170 props->state = out_mad->data[32] & 0xf;
171 props->phys_state = out_mad->data[33] >> 4;
97f52eb4 172 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
1da177e4 173 props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len;
d1887ec2 174 props->max_msg_sz = 0x80000000;
1da177e4 175 props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len;
9825051e 176 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
97f52eb4 177 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
1da177e4
LT
178 props->active_width = out_mad->data[31] & 0xf;
179 props->active_speed = out_mad->data[35] >> 4;
9825051e
JM
180 props->max_mtu = out_mad->data[41] & 0xf;
181 props->active_mtu = out_mad->data[36] >> 4;
182 props->subnet_timeout = out_mad->data[51] & 0x1f;
a8bf4e77
JM
183 props->max_vl_num = out_mad->data[37] >> 4;
184 props->init_type_reply = out_mad->data[41] >> 4;
1da177e4
LT
185
186 out:
187 kfree(in_mad);
188 kfree(out_mad);
189 return err;
190}
191
6dfc3901
RD
192static int mthca_modify_device(struct ib_device *ibdev,
193 int mask,
194 struct ib_device_modify *props)
195{
196 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
197 return -EOPNOTSUPP;
198
199 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
200 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
201 return -ERESTARTSYS;
202 memcpy(ibdev->node_desc, props->node_desc, 64);
203 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
204 }
205
206 return 0;
207}
208
1da177e4
LT
209static int mthca_modify_port(struct ib_device *ibdev,
210 u8 port, int port_modify_mask,
211 struct ib_port_modify *props)
212{
213 struct mthca_set_ib_param set_ib;
214 struct ib_port_attr attr;
215 int err;
216 u8 status;
217
fd9cfdd1 218 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
1da177e4
LT
219 return -ERESTARTSYS;
220
221 err = mthca_query_port(ibdev, port, &attr);
222 if (err)
223 goto out;
224
225 set_ib.set_si_guid = 0;
226 set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR);
227
228 set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
229 ~props->clr_port_cap_mask;
230
231 err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port, &status);
232 if (err)
233 goto out;
234 if (status) {
235 err = -EINVAL;
236 goto out;
237 }
238
239out:
fd9cfdd1 240 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
1da177e4
LT
241 return err;
242}
243
244static int mthca_query_pkey(struct ib_device *ibdev,
245 u8 port, u16 index, u16 *pkey)
246{
247 struct ib_smp *in_mad = NULL;
248 struct ib_smp *out_mad = NULL;
249 int err = -ENOMEM;
250 u8 status;
251
105e50a5 252 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1da177e4
LT
253 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
254 if (!in_mad || !out_mad)
255 goto out;
256
87635b71
RD
257 init_query_mad(in_mad);
258 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
259 in_mad->attr_mod = cpu_to_be32(index / 32);
1da177e4
LT
260
261 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
262 port, NULL, NULL, in_mad, out_mad,
263 &status);
264 if (err)
265 goto out;
266 if (status) {
267 err = -EINVAL;
268 goto out;
269 }
270
97f52eb4 271 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
1da177e4
LT
272
273 out:
274 kfree(in_mad);
275 kfree(out_mad);
276 return err;
277}
278
279static int mthca_query_gid(struct ib_device *ibdev, u8 port,
280 int index, union ib_gid *gid)
281{
282 struct ib_smp *in_mad = NULL;
283 struct ib_smp *out_mad = NULL;
284 int err = -ENOMEM;
285 u8 status;
286
105e50a5 287 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1da177e4
LT
288 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
289 if (!in_mad || !out_mad)
290 goto out;
291
87635b71
RD
292 init_query_mad(in_mad);
293 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
294 in_mad->attr_mod = cpu_to_be32(port);
1da177e4
LT
295
296 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
297 port, NULL, NULL, in_mad, out_mad,
298 &status);
299 if (err)
300 goto out;
301 if (status) {
302 err = -EINVAL;
303 goto out;
304 }
305
306 memcpy(gid->raw, out_mad->data + 8, 8);
307
87635b71
RD
308 init_query_mad(in_mad);
309 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
310 in_mad->attr_mod = cpu_to_be32(index / 8);
1da177e4
LT
311
312 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
313 port, NULL, NULL, in_mad, out_mad,
314 &status);
315 if (err)
316 goto out;
317 if (status) {
318 err = -EINVAL;
319 goto out;
320 }
321
254abfd3 322 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
1da177e4
LT
323
324 out:
325 kfree(in_mad);
326 kfree(out_mad);
327 return err;
328}
329
5e0b537c
RD
330static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
331 struct ib_udata *udata)
332{
333 struct mthca_alloc_ucontext_resp uresp;
334 struct mthca_ucontext *context;
335 int err;
336
d8410647
JM
337 if (!(to_mdev(ibdev)->active))
338 return ERR_PTR(-EAGAIN);
339
5e0b537c
RD
340 memset(&uresp, 0, sizeof uresp);
341
342 uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
343 if (mthca_is_memfree(to_mdev(ibdev)))
344 uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size;
345 else
346 uresp.uarc_size = 0;
347
348 context = kmalloc(sizeof *context, GFP_KERNEL);
349 if (!context)
350 return ERR_PTR(-ENOMEM);
351
352 err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
353 if (err) {
354 kfree(context);
355 return ERR_PTR(err);
356 }
357
358 context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
359 if (IS_ERR(context->db_tab)) {
360 err = PTR_ERR(context->db_tab);
361 mthca_uar_free(to_mdev(ibdev), &context->uar);
362 kfree(context);
363 return ERR_PTR(err);
364 }
365
366 if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
367 mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab);
368 mthca_uar_free(to_mdev(ibdev), &context->uar);
369 kfree(context);
370 return ERR_PTR(-EFAULT);
371 }
372
baaad380
RD
373 context->reg_mr_warned = 0;
374
5e0b537c
RD
375 return &context->ibucontext;
376}
377
378static int mthca_dealloc_ucontext(struct ib_ucontext *context)
379{
380 mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar,
381 to_mucontext(context)->db_tab);
382 mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar);
383 kfree(to_mucontext(context));
384
385 return 0;
386}
387
53b8b3ff
RD
388static int mthca_mmap_uar(struct ib_ucontext *context,
389 struct vm_area_struct *vma)
390{
391 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
392 return -EINVAL;
393
394 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
395
6d376756
MT
396 if (io_remap_pfn_range(vma, vma->vm_start,
397 to_mucontext(context)->uar.pfn,
398 PAGE_SIZE, vma->vm_page_prot))
53b8b3ff
RD
399 return -EAGAIN;
400
401 return 0;
402}
403
1cf296b6
RD
404static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
405 struct ib_ucontext *context,
406 struct ib_udata *udata)
1da177e4
LT
407{
408 struct mthca_pd *pd;
409 int err;
410
411 pd = kmalloc(sizeof *pd, GFP_KERNEL);
412 if (!pd)
413 return ERR_PTR(-ENOMEM);
414
99264c1e 415 err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
1da177e4
LT
416 if (err) {
417 kfree(pd);
418 return ERR_PTR(err);
419 }
420
99264c1e
RD
421 if (context) {
422 if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
423 mthca_pd_free(to_mdev(ibdev), pd);
424 kfree(pd);
425 return ERR_PTR(-EFAULT);
426 }
427 }
428
1da177e4
LT
429 return &pd->ibpd;
430}
431
432static int mthca_dealloc_pd(struct ib_pd *pd)
433{
434 mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
435 kfree(pd);
436
437 return 0;
438}
439
440static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
441 struct ib_ah_attr *ah_attr)
442{
443 int err;
444 struct mthca_ah *ah;
445
8df8a34d 446 ah = kmalloc(sizeof *ah, GFP_ATOMIC);
1da177e4
LT
447 if (!ah)
448 return ERR_PTR(-ENOMEM);
449
450 err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah);
451 if (err) {
452 kfree(ah);
453 return ERR_PTR(err);
454 }
455
456 return &ah->ibah;
457}
458
459static int mthca_ah_destroy(struct ib_ah *ah)
460{
461 mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
462 kfree(ah);
463
464 return 0;
465}
466
ec34a922
RD
467static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
468 struct ib_srq_init_attr *init_attr,
469 struct ib_udata *udata)
470{
471 struct mthca_create_srq ucmd;
472 struct mthca_ucontext *context = NULL;
473 struct mthca_srq *srq;
474 int err;
475
476 srq = kmalloc(sizeof *srq, GFP_KERNEL);
477 if (!srq)
478 return ERR_PTR(-ENOMEM);
479
480 if (pd->uobject) {
481 context = to_mucontext(pd->uobject->context);
482
17e2e819
JM
483 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
484 err = -EFAULT;
485 goto err_free;
486 }
ec34a922
RD
487
488 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
489 context->db_tab, ucmd.db_index,
490 ucmd.db_page);
491
492 if (err)
493 goto err_free;
494
495 srq->mr.ibmr.lkey = ucmd.lkey;
496 srq->db_index = ucmd.db_index;
497 }
498
499 err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
500 &init_attr->attr, srq);
501
502 if (err && pd->uobject)
503 mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
504 context->db_tab, ucmd.db_index);
505
506 if (err)
507 goto err_free;
508
509 if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
510 mthca_free_srq(to_mdev(pd->device), srq);
511 err = -EFAULT;
512 goto err_free;
513 }
514
515 return &srq->ibsrq;
516
517err_free:
518 kfree(srq);
519
520 return ERR_PTR(err);
521}
522
523static int mthca_destroy_srq(struct ib_srq *srq)
524{
525 struct mthca_ucontext *context;
526
527 if (srq->uobject) {
528 context = to_mucontext(srq->uobject->context);
529
530 mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
531 context->db_tab, to_msrq(srq)->db_index);
532 }
533
534 mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
535 kfree(srq);
536
537 return 0;
538}
539
1da177e4 540static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
1cf296b6
RD
541 struct ib_qp_init_attr *init_attr,
542 struct ib_udata *udata)
1da177e4 543{
80c8ec2c 544 struct mthca_create_qp ucmd;
1da177e4
LT
545 struct mthca_qp *qp;
546 int err;
547
b846f25a
EC
548 if (init_attr->create_flags)
549 return ERR_PTR(-EINVAL);
550
1da177e4
LT
551 switch (init_attr->qp_type) {
552 case IB_QPT_RC:
553 case IB_QPT_UC:
554 case IB_QPT_UD:
555 {
80c8ec2c
RD
556 struct mthca_ucontext *context;
557
1da177e4
LT
558 qp = kmalloc(sizeof *qp, GFP_KERNEL);
559 if (!qp)
560 return ERR_PTR(-ENOMEM);
561
80c8ec2c
RD
562 if (pd->uobject) {
563 context = to_mucontext(pd->uobject->context);
564
17e2e819
JM
565 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
566 kfree(qp);
80c8ec2c 567 return ERR_PTR(-EFAULT);
17e2e819 568 }
80c8ec2c
RD
569
570 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
571 context->db_tab,
572 ucmd.sq_db_index, ucmd.sq_db_page);
573 if (err) {
574 kfree(qp);
575 return ERR_PTR(err);
576 }
577
578 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
579 context->db_tab,
580 ucmd.rq_db_index, ucmd.rq_db_page);
581 if (err) {
582 mthca_unmap_user_db(to_mdev(pd->device),
583 &context->uar,
584 context->db_tab,
585 ucmd.sq_db_index);
586 kfree(qp);
587 return ERR_PTR(err);
588 }
589
590 qp->mr.ibmr.lkey = ucmd.lkey;
591 qp->sq.db_index = ucmd.sq_db_index;
592 qp->rq.db_index = ucmd.rq_db_index;
593 }
1da177e4
LT
594
595 err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
596 to_mcq(init_attr->send_cq),
597 to_mcq(init_attr->recv_cq),
598 init_attr->qp_type, init_attr->sq_sig_type,
80c8ec2c
RD
599 &init_attr->cap, qp);
600
601 if (err && pd->uobject) {
602 context = to_mucontext(pd->uobject->context);
603
604 mthca_unmap_user_db(to_mdev(pd->device),
605 &context->uar,
606 context->db_tab,
607 ucmd.sq_db_index);
608 mthca_unmap_user_db(to_mdev(pd->device),
609 &context->uar,
610 context->db_tab,
611 ucmd.rq_db_index);
612 }
613
1da177e4
LT
614 qp->ibqp.qp_num = qp->qpn;
615 break;
616 }
617 case IB_QPT_SMI:
618 case IB_QPT_GSI:
619 {
80c8ec2c
RD
620 /* Don't allow userspace to create special QPs */
621 if (pd->uobject)
622 return ERR_PTR(-EINVAL);
623
1da177e4
LT
624 qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
625 if (!qp)
626 return ERR_PTR(-ENOMEM);
627
1da177e4
LT
628 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
629
630 err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
631 to_mcq(init_attr->send_cq),
632 to_mcq(init_attr->recv_cq),
80c8ec2c 633 init_attr->sq_sig_type, &init_attr->cap,
1da177e4
LT
634 qp->ibqp.qp_num, init_attr->port_num,
635 to_msqp(qp));
636 break;
637 }
638 default:
639 /* Don't support raw QPs */
640 return ERR_PTR(-ENOSYS);
641 }
642
643 if (err) {
644 kfree(qp);
645 return ERR_PTR(err);
646 }
647
80c8ec2c
RD
648 init_attr->cap.max_send_wr = qp->sq.max;
649 init_attr->cap.max_recv_wr = qp->rq.max;
650 init_attr->cap.max_send_sge = qp->sq.max_gs;
651 init_attr->cap.max_recv_sge = qp->rq.max_gs;
77369ed3 652 init_attr->cap.max_inline_data = qp->max_inline_data;
1da177e4
LT
653
654 return &qp->ibqp;
655}
656
657static int mthca_destroy_qp(struct ib_qp *qp)
658{
80c8ec2c
RD
659 if (qp->uobject) {
660 mthca_unmap_user_db(to_mdev(qp->device),
661 &to_mucontext(qp->uobject->context)->uar,
662 to_mucontext(qp->uobject->context)->db_tab,
663 to_mqp(qp)->sq.db_index);
664 mthca_unmap_user_db(to_mdev(qp->device),
665 &to_mucontext(qp->uobject->context)->uar,
666 to_mucontext(qp->uobject->context)->db_tab,
667 to_mqp(qp)->rq.db_index);
668 }
1da177e4
LT
669 mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
670 kfree(qp);
671 return 0;
672}
673
1cf296b6 674static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
f4fd0b22 675 int comp_vector,
1cf296b6
RD
676 struct ib_ucontext *context,
677 struct ib_udata *udata)
1da177e4 678{
74c2174e 679 struct mthca_create_cq ucmd;
1da177e4
LT
680 struct mthca_cq *cq;
681 int nent;
682 int err;
683
efaae8f7
JM
684 if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
685 return ERR_PTR(-EINVAL);
686
74c2174e
RD
687 if (context) {
688 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
689 return ERR_PTR(-EFAULT);
690
691 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
692 to_mucontext(context)->db_tab,
693 ucmd.set_db_index, ucmd.set_db_page);
694 if (err)
695 return ERR_PTR(err);
696
697 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
698 to_mucontext(context)->db_tab,
699 ucmd.arm_db_index, ucmd.arm_db_page);
700 if (err)
701 goto err_unmap_set;
702 }
703
1da177e4 704 cq = kmalloc(sizeof *cq, GFP_KERNEL);
74c2174e
RD
705 if (!cq) {
706 err = -ENOMEM;
707 goto err_unmap_arm;
708 }
709
710 if (context) {
4885bf64
RD
711 cq->buf.mr.ibmr.lkey = ucmd.lkey;
712 cq->set_ci_db_index = ucmd.set_db_index;
713 cq->arm_db_index = ucmd.arm_db_index;
74c2174e 714 }
1da177e4
LT
715
716 for (nent = 1; nent <= entries; nent <<= 1)
717 ; /* nothing */
718
74c2174e
RD
719 err = mthca_init_cq(to_mdev(ibdev), nent,
720 context ? to_mucontext(context) : NULL,
721 context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
722 cq);
723 if (err)
724 goto err_free;
725
726 if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
727 mthca_free_cq(to_mdev(ibdev), cq);
728 goto err_free;
1da177e4
LT
729 }
730
4885bf64
RD
731 cq->resize_buf = NULL;
732
1da177e4 733 return &cq->ibcq;
74c2174e
RD
734
735err_free:
736 kfree(cq);
737
738err_unmap_arm:
739 if (context)
740 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
741 to_mucontext(context)->db_tab, ucmd.arm_db_index);
742
743err_unmap_set:
744 if (context)
745 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
746 to_mucontext(context)->db_tab, ucmd.set_db_index);
747
748 return ERR_PTR(err);
1da177e4
LT
749}
750
4885bf64
RD
751static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq,
752 int entries)
753{
754 int ret;
755
756 spin_lock_irq(&cq->lock);
757 if (cq->resize_buf) {
758 ret = -EBUSY;
759 goto unlock;
760 }
761
762 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
763 if (!cq->resize_buf) {
764 ret = -ENOMEM;
765 goto unlock;
766 }
767
768 cq->resize_buf->state = CQ_RESIZE_ALLOC;
769
770 ret = 0;
771
772unlock:
773 spin_unlock_irq(&cq->lock);
774
775 if (ret)
776 return ret;
777
778 ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
779 if (ret) {
780 spin_lock_irq(&cq->lock);
781 kfree(cq->resize_buf);
782 cq->resize_buf = NULL;
783 spin_unlock_irq(&cq->lock);
784 return ret;
785 }
786
787 cq->resize_buf->cqe = entries - 1;
788
789 spin_lock_irq(&cq->lock);
790 cq->resize_buf->state = CQ_RESIZE_READY;
791 spin_unlock_irq(&cq->lock);
792
793 return 0;
794}
795
796static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
797{
798 struct mthca_dev *dev = to_mdev(ibcq->device);
799 struct mthca_cq *cq = to_mcq(ibcq);
800 struct mthca_resize_cq ucmd;
801 u32 lkey;
802 u8 status;
803 int ret;
804
805 if (entries < 1 || entries > dev->limits.max_cqes)
806 return -EINVAL;
807
c93b6fba
RD
808 mutex_lock(&cq->mutex);
809
4885bf64 810 entries = roundup_pow_of_two(entries + 1);
c93b6fba
RD
811 if (entries == ibcq->cqe + 1) {
812 ret = 0;
813 goto out;
814 }
4885bf64
RD
815
816 if (cq->is_kernel) {
817 ret = mthca_alloc_resize_buf(dev, cq, entries);
818 if (ret)
c93b6fba 819 goto out;
4885bf64
RD
820 lkey = cq->resize_buf->buf.mr.ibmr.lkey;
821 } else {
c93b6fba
RD
822 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
823 ret = -EFAULT;
824 goto out;
825 }
4885bf64
RD
826 lkey = ucmd.lkey;
827 }
828
f0d1b0b3 829 ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries), &status);
4885bf64
RD
830 if (status)
831 ret = -EINVAL;
832
833 if (ret) {
834 if (cq->resize_buf) {
835 mthca_free_cq_buf(dev, &cq->resize_buf->buf,
836 cq->resize_buf->cqe);
837 kfree(cq->resize_buf);
838 spin_lock_irq(&cq->lock);
839 cq->resize_buf = NULL;
840 spin_unlock_irq(&cq->lock);
841 }
c93b6fba 842 goto out;
4885bf64
RD
843 }
844
845 if (cq->is_kernel) {
846 struct mthca_cq_buf tbuf;
847 int tcqe;
848
849 spin_lock_irq(&cq->lock);
850 if (cq->resize_buf->state == CQ_RESIZE_READY) {
851 mthca_cq_resize_copy_cqes(cq);
852 tbuf = cq->buf;
853 tcqe = cq->ibcq.cqe;
854 cq->buf = cq->resize_buf->buf;
855 cq->ibcq.cqe = cq->resize_buf->cqe;
856 } else {
857 tbuf = cq->resize_buf->buf;
858 tcqe = cq->resize_buf->cqe;
859 }
860
861 kfree(cq->resize_buf);
862 cq->resize_buf = NULL;
863 spin_unlock_irq(&cq->lock);
864
865 mthca_free_cq_buf(dev, &tbuf, tcqe);
866 } else
867 ibcq->cqe = entries - 1;
868
c93b6fba
RD
869out:
870 mutex_unlock(&cq->mutex);
871
872 return ret;
4885bf64
RD
873}
874
1da177e4
LT
875static int mthca_destroy_cq(struct ib_cq *cq)
876{
74c2174e
RD
877 if (cq->uobject) {
878 mthca_unmap_user_db(to_mdev(cq->device),
879 &to_mucontext(cq->uobject->context)->uar,
880 to_mucontext(cq->uobject->context)->db_tab,
881 to_mcq(cq)->arm_db_index);
882 mthca_unmap_user_db(to_mdev(cq->device),
883 &to_mucontext(cq->uobject->context)->uar,
884 to_mucontext(cq->uobject->context)->db_tab,
885 to_mcq(cq)->set_ci_db_index);
886 }
1da177e4
LT
887 mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
888 kfree(cq);
889
890 return 0;
891}
892
893static inline u32 convert_access(int acc)
894{
895 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC : 0) |
896 (acc & IB_ACCESS_REMOTE_WRITE ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) |
897 (acc & IB_ACCESS_REMOTE_READ ? MTHCA_MPT_FLAG_REMOTE_READ : 0) |
898 (acc & IB_ACCESS_LOCAL_WRITE ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0) |
899 MTHCA_MPT_FLAG_LOCAL_READ;
900}
901
902static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
903{
904 struct mthca_mr *mr;
905 int err;
906
907 mr = kmalloc(sizeof *mr, GFP_KERNEL);
908 if (!mr)
909 return ERR_PTR(-ENOMEM);
910
911 err = mthca_mr_alloc_notrans(to_mdev(pd->device),
912 to_mpd(pd)->pd_num,
913 convert_access(acc), mr);
914
915 if (err) {
916 kfree(mr);
917 return ERR_PTR(err);
918 }
919
f7c6a7b5
RD
920 mr->umem = NULL;
921
1da177e4
LT
922 return &mr->ibmr;
923}
924
925static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
926 struct ib_phys_buf *buffer_list,
927 int num_phys_buf,
928 int acc,
929 u64 *iova_start)
930{
931 struct mthca_mr *mr;
932 u64 *page_list;
933 u64 total_size;
0d89fe2c 934 unsigned long mask;
1da177e4
LT
935 int shift;
936 int npages;
937 int err;
938 int i, j, n;
939
0d89fe2c 940 mask = buffer_list[0].addr ^ *iova_start;
1da177e4
LT
941 total_size = 0;
942 for (i = 0; i < num_phys_buf; ++i) {
6627fa66
MT
943 if (i != 0)
944 mask |= buffer_list[i].addr;
945 if (i != num_phys_buf - 1)
946 mask |= buffer_list[i].addr + buffer_list[i].size;
1da177e4
LT
947
948 total_size += buffer_list[i].size;
1da177e4
LT
949 }
950
6627fa66
MT
951 if (mask & ~PAGE_MASK)
952 return ERR_PTR(-EINVAL);
953
0d89fe2c 954 shift = __ffs(mask | 1 << 31);
1da177e4
LT
955
956 buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
957 buffer_list[0].addr &= ~0ull << shift;
958
959 mr = kmalloc(sizeof *mr, GFP_KERNEL);
960 if (!mr)
961 return ERR_PTR(-ENOMEM);
962
963 npages = 0;
964 for (i = 0; i < num_phys_buf; ++i)
965 npages += (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
966
967 if (!npages)
968 return &mr->ibmr;
969
970 page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL);
971 if (!page_list) {
972 kfree(mr);
973 return ERR_PTR(-ENOMEM);
974 }
975
976 n = 0;
977 for (i = 0; i < num_phys_buf; ++i)
978 for (j = 0;
979 j < (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
980 ++j)
981 page_list[n++] = buffer_list[i].addr + ((u64) j << shift);
982
983 mthca_dbg(to_mdev(pd->device), "Registering memory at %llx (iova %llx) "
984 "in PD %x; shift %d, npages %d.\n",
985 (unsigned long long) buffer_list[0].addr,
986 (unsigned long long) *iova_start,
987 to_mpd(pd)->pd_num,
988 shift, npages);
989
990 err = mthca_mr_alloc_phys(to_mdev(pd->device),
991 to_mpd(pd)->pd_num,
992 page_list, shift, npages,
993 *iova_start, total_size,
994 convert_access(acc), mr);
995
996 if (err) {
761f9eb8 997 kfree(page_list);
1da177e4
LT
998 kfree(mr);
999 return ERR_PTR(err);
1000 }
1001
1002 kfree(page_list);
f7c6a7b5
RD
1003 mr->umem = NULL;
1004
1da177e4
LT
1005 return &mr->ibmr;
1006}
1007
f7c6a7b5
RD
1008static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1009 u64 virt, int acc, struct ib_udata *udata)
24d4281b
RD
1010{
1011 struct mthca_dev *dev = to_mdev(pd->device);
1012 struct ib_umem_chunk *chunk;
1013 struct mthca_mr *mr;
cb9fbc5c 1014 struct mthca_reg_mr ucmd;
24d4281b
RD
1015 u64 *pages;
1016 int shift, n, len;
1017 int i, j, k;
1018 int err = 0;
b2875d4c 1019 int write_mtt_size;
24d4281b 1020
baaad380
RD
1021 if (udata->inlen - sizeof (struct ib_uverbs_cmd_hdr) < sizeof ucmd) {
1022 if (!to_mucontext(pd->uobject->context)->reg_mr_warned) {
1023 mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n",
1024 current->comm);
1025 mthca_warn(dev, " Update libmthca to fix this.\n");
1026 }
1027 ++to_mucontext(pd->uobject->context)->reg_mr_warned;
1028 ucmd.mr_attrs = 0;
1029 } else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
cb9fbc5c
AK
1030 return ERR_PTR(-EFAULT);
1031
24d4281b
RD
1032 mr = kmalloc(sizeof *mr, GFP_KERNEL);
1033 if (!mr)
1034 return ERR_PTR(-ENOMEM);
1035
cb9fbc5c
AK
1036 mr->umem = ib_umem_get(pd->uobject->context, start, length, acc,
1037 ucmd.mr_attrs & MTHCA_MR_DMASYNC);
1038
f7c6a7b5
RD
1039 if (IS_ERR(mr->umem)) {
1040 err = PTR_ERR(mr->umem);
1041 goto err;
1042 }
1043
1044 shift = ffs(mr->umem->page_size) - 1;
1045
24d4281b 1046 n = 0;
f7c6a7b5 1047 list_for_each_entry(chunk, &mr->umem->chunk_list, list)
24d4281b
RD
1048 n += chunk->nents;
1049
1050 mr->mtt = mthca_alloc_mtt(dev, n);
1051 if (IS_ERR(mr->mtt)) {
1052 err = PTR_ERR(mr->mtt);
f7c6a7b5 1053 goto err_umem;
24d4281b
RD
1054 }
1055
1056 pages = (u64 *) __get_free_page(GFP_KERNEL);
1057 if (!pages) {
1058 err = -ENOMEM;
1059 goto err_mtt;
1060 }
1061
1062 i = n = 0;
1063
b2875d4c
MT
1064 write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
1065
f7c6a7b5 1066 list_for_each_entry(chunk, &mr->umem->chunk_list, list)
24d4281b
RD
1067 for (j = 0; j < chunk->nmap; ++j) {
1068 len = sg_dma_len(&chunk->page_list[j]) >> shift;
1069 for (k = 0; k < len; ++k) {
1070 pages[i++] = sg_dma_address(&chunk->page_list[j]) +
f7c6a7b5 1071 mr->umem->page_size * k;
24d4281b 1072 /*
b2875d4c
MT
1073 * Be friendly to write_mtt and pass it chunks
1074 * of appropriate size.
24d4281b 1075 */
b2875d4c
MT
1076 if (i == write_mtt_size) {
1077 err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
24d4281b
RD
1078 if (err)
1079 goto mtt_done;
1080 n += i;
1081 i = 0;
1082 }
1083 }
1084 }
1085
1086 if (i)
1087 err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
1088mtt_done:
1089 free_page((unsigned long) pages);
1090 if (err)
1091 goto err_mtt;
1092
f7c6a7b5
RD
1093 err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, virt, length,
1094 convert_access(acc), mr);
24d4281b
RD
1095
1096 if (err)
1097 goto err_mtt;
1098
1099 return &mr->ibmr;
1100
1101err_mtt:
1102 mthca_free_mtt(dev, mr->mtt);
1103
f7c6a7b5
RD
1104err_umem:
1105 ib_umem_release(mr->umem);
1106
24d4281b
RD
1107err:
1108 kfree(mr);
1109 return ERR_PTR(err);
1110}
1111
1da177e4
LT
1112static int mthca_dereg_mr(struct ib_mr *mr)
1113{
e464b2a6 1114 struct mthca_mr *mmr = to_mmr(mr);
f7c6a7b5 1115
e464b2a6 1116 mthca_free_mr(to_mdev(mr->device), mmr);
f7c6a7b5
RD
1117 if (mmr->umem)
1118 ib_umem_release(mmr->umem);
e464b2a6 1119 kfree(mmr);
f7c6a7b5 1120
1da177e4
LT
1121 return 0;
1122}
1123
e0f5fdca
MT
1124static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
1125 struct ib_fmr_attr *fmr_attr)
1126{
1127 struct mthca_fmr *fmr;
1128 int err;
1129
9d79f1b4 1130 fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
e0f5fdca
MT
1131 if (!fmr)
1132 return ERR_PTR(-ENOMEM);
1133
9d79f1b4 1134 memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
e0f5fdca
MT
1135 err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
1136 convert_access(mr_access_flags), fmr);
1137
1138 if (err) {
1139 kfree(fmr);
1140 return ERR_PTR(err);
1141 }
1142
1143 return &fmr->ibmr;
1144}
1145
1146static int mthca_dealloc_fmr(struct ib_fmr *fmr)
1147{
1148 struct mthca_fmr *mfmr = to_mfmr(fmr);
1149 int err;
1150
1151 err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
1152 if (err)
1153 return err;
1154
1155 kfree(mfmr);
1156 return 0;
1157}
1158
1159static int mthca_unmap_fmr(struct list_head *fmr_list)
1160{
1161 struct ib_fmr *fmr;
1162 int err;
1163 u8 status;
1164 struct mthca_dev *mdev = NULL;
1165
1166 list_for_each_entry(fmr, fmr_list, list) {
1167 if (mdev && to_mdev(fmr->device) != mdev)
1168 return -EINVAL;
1169 mdev = to_mdev(fmr->device);
1170 }
1171
1172 if (!mdev)
1173 return 0;
1174
d10ddbf6 1175 if (mthca_is_memfree(mdev)) {
e0f5fdca
MT
1176 list_for_each_entry(fmr, fmr_list, list)
1177 mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
1178
1179 wmb();
1180 } else
1181 list_for_each_entry(fmr, fmr_list, list)
1182 mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
1183
1184 err = mthca_SYNC_TPT(mdev, &status);
1185 if (err)
1186 return err;
1187 if (status)
1188 return -EINVAL;
1189 return 0;
1190}
1191
f4e91eb4
TJ
1192static ssize_t show_rev(struct device *device, struct device_attribute *attr,
1193 char *buf)
1da177e4 1194{
f4e91eb4
TJ
1195 struct mthca_dev *dev =
1196 container_of(device, struct mthca_dev, ib_dev.dev);
1da177e4
LT
1197 return sprintf(buf, "%x\n", dev->rev_id);
1198}
1199
f4e91eb4
TJ
1200static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
1201 char *buf)
1da177e4 1202{
f4e91eb4
TJ
1203 struct mthca_dev *dev =
1204 container_of(device, struct mthca_dev, ib_dev.dev);
87cfe323 1205 return sprintf(buf, "%d.%d.%d\n", (int) (dev->fw_ver >> 32),
1da177e4
LT
1206 (int) (dev->fw_ver >> 16) & 0xffff,
1207 (int) dev->fw_ver & 0xffff);
1208}
1209
f4e91eb4
TJ
1210static ssize_t show_hca(struct device *device, struct device_attribute *attr,
1211 char *buf)
1da177e4 1212{
f4e91eb4
TJ
1213 struct mthca_dev *dev =
1214 container_of(device, struct mthca_dev, ib_dev.dev);
68a3c212
RD
1215 switch (dev->pdev->device) {
1216 case PCI_DEVICE_ID_MELLANOX_TAVOR:
1217 return sprintf(buf, "MT23108\n");
1218 case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT:
1219 return sprintf(buf, "MT25208 (MT23108 compat mode)\n");
1220 case PCI_DEVICE_ID_MELLANOX_ARBEL:
1221 return sprintf(buf, "MT25208\n");
1222 case PCI_DEVICE_ID_MELLANOX_SINAI:
1223 case PCI_DEVICE_ID_MELLANOX_SINAI_OLD:
1224 return sprintf(buf, "MT25204\n");
1225 default:
1226 return sprintf(buf, "unknown\n");
1da177e4
LT
1227 }
1228}
1229
f4e91eb4
TJ
1230static ssize_t show_board(struct device *device, struct device_attribute *attr,
1231 char *buf)
2e8b981c 1232{
f4e91eb4
TJ
1233 struct mthca_dev *dev =
1234 container_of(device, struct mthca_dev, ib_dev.dev);
2e8b981c
MT
1235 return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
1236}
1237
f4e91eb4
TJ
1238static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1239static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
1240static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1241static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
1da177e4 1242
f4e91eb4
TJ
1243static struct device_attribute *mthca_dev_attributes[] = {
1244 &dev_attr_hw_rev,
1245 &dev_attr_fw_ver,
1246 &dev_attr_hca_type,
1247 &dev_attr_board_id
1da177e4
LT
1248};
1249
cf311cd4
SH
1250static int mthca_init_node_data(struct mthca_dev *dev)
1251{
1252 struct ib_smp *in_mad = NULL;
1253 struct ib_smp *out_mad = NULL;
1254 int err = -ENOMEM;
1255 u8 status;
1256
1257 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1258 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1259 if (!in_mad || !out_mad)
1260 goto out;
1261
1262 init_query_mad(in_mad);
6dfc3901
RD
1263 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1264
1265 err = mthca_MAD_IFC(dev, 1, 1,
1266 1, NULL, NULL, in_mad, out_mad,
1267 &status);
1268 if (err)
1269 goto out;
1270 if (status) {
1271 err = -EINVAL;
1272 goto out;
1273 }
1274
1275 memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
1276
cf311cd4
SH
1277 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1278
1279 err = mthca_MAD_IFC(dev, 1, 1,
1280 1, NULL, NULL, in_mad, out_mad,
1281 &status);
1282 if (err)
1283 goto out;
1284 if (status) {
1285 err = -EINVAL;
1286 goto out;
1287 }
1288
6ccef1de
JM
1289 if (mthca_is_memfree(dev))
1290 dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
cf311cd4
SH
1291 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1292
1293out:
1294 kfree(in_mad);
1295 kfree(out_mad);
1296 return err;
1297}
1298
1da177e4
LT
1299int mthca_register_device(struct mthca_dev *dev)
1300{
1301 int ret;
1302 int i;
1303
cf311cd4
SH
1304 ret = mthca_init_node_data(dev);
1305 if (ret)
1306 return ret;
1307
1da177e4 1308 strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
1cf296b6
RD
1309 dev->ib_dev.owner = THIS_MODULE;
1310
274c0891 1311 dev->ib_dev.uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION;
883a99c7
RD
1312 dev->ib_dev.uverbs_cmd_mask =
1313 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1314 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1315 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1316 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1317 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1318 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1319 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1320 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1321 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
4885bf64 1322 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
883a99c7
RD
1323 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1324 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
8ebe5077 1325 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
883a99c7
RD
1326 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1327 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1328 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
5beba532 1329 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
07ebafba 1330 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
1da177e4 1331 dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
f4fd0b22 1332 dev->ib_dev.num_comp_vectors = 1;
1da177e4 1333 dev->ib_dev.dma_device = &dev->pdev->dev;
1da177e4
LT
1334 dev->ib_dev.query_device = mthca_query_device;
1335 dev->ib_dev.query_port = mthca_query_port;
6dfc3901 1336 dev->ib_dev.modify_device = mthca_modify_device;
1da177e4
LT
1337 dev->ib_dev.modify_port = mthca_modify_port;
1338 dev->ib_dev.query_pkey = mthca_query_pkey;
1339 dev->ib_dev.query_gid = mthca_query_gid;
5e0b537c
RD
1340 dev->ib_dev.alloc_ucontext = mthca_alloc_ucontext;
1341 dev->ib_dev.dealloc_ucontext = mthca_dealloc_ucontext;
53b8b3ff 1342 dev->ib_dev.mmap = mthca_mmap_uar;
1da177e4
LT
1343 dev->ib_dev.alloc_pd = mthca_alloc_pd;
1344 dev->ib_dev.dealloc_pd = mthca_dealloc_pd;
1345 dev->ib_dev.create_ah = mthca_ah_create;
1d89b1ae 1346 dev->ib_dev.query_ah = mthca_ah_query;
1da177e4 1347 dev->ib_dev.destroy_ah = mthca_ah_destroy;
ec34a922
RD
1348
1349 if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
1350 dev->ib_dev.create_srq = mthca_create_srq;
8ebe5077
EC
1351 dev->ib_dev.modify_srq = mthca_modify_srq;
1352 dev->ib_dev.query_srq = mthca_query_srq;
ec34a922 1353 dev->ib_dev.destroy_srq = mthca_destroy_srq;
5beba532
RD
1354 dev->ib_dev.uverbs_cmd_mask |=
1355 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1356 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1357 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1358 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
ec34a922
RD
1359
1360 if (mthca_is_memfree(dev))
1361 dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
1362 else
1363 dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv;
1364 }
1365
1da177e4
LT
1366 dev->ib_dev.create_qp = mthca_create_qp;
1367 dev->ib_dev.modify_qp = mthca_modify_qp;
8ebe5077 1368 dev->ib_dev.query_qp = mthca_query_qp;
1da177e4
LT
1369 dev->ib_dev.destroy_qp = mthca_destroy_qp;
1370 dev->ib_dev.create_cq = mthca_create_cq;
4885bf64 1371 dev->ib_dev.resize_cq = mthca_resize_cq;
1da177e4
LT
1372 dev->ib_dev.destroy_cq = mthca_destroy_cq;
1373 dev->ib_dev.poll_cq = mthca_poll_cq;
1374 dev->ib_dev.get_dma_mr = mthca_get_dma_mr;
1375 dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr;
24d4281b 1376 dev->ib_dev.reg_user_mr = mthca_reg_user_mr;
1da177e4 1377 dev->ib_dev.dereg_mr = mthca_dereg_mr;
e0f5fdca
MT
1378
1379 if (dev->mthca_flags & MTHCA_FLAG_FMR) {
1380 dev->ib_dev.alloc_fmr = mthca_alloc_fmr;
1381 dev->ib_dev.unmap_fmr = mthca_unmap_fmr;
1382 dev->ib_dev.dealloc_fmr = mthca_dealloc_fmr;
d10ddbf6 1383 if (mthca_is_memfree(dev))
e0f5fdca
MT
1384 dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr;
1385 else
1386 dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr;
1387 }
1388
1da177e4
LT
1389 dev->ib_dev.attach_mcast = mthca_multicast_attach;
1390 dev->ib_dev.detach_mcast = mthca_multicast_detach;
1391 dev->ib_dev.process_mad = mthca_process_mad;
1392
d10ddbf6 1393 if (mthca_is_memfree(dev)) {
1da177e4
LT
1394 dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq;
1395 dev->ib_dev.post_send = mthca_arbel_post_send;
1396 dev->ib_dev.post_recv = mthca_arbel_post_receive;
1397 } else {
1398 dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq;
1399 dev->ib_dev.post_send = mthca_tavor_post_send;
1400 dev->ib_dev.post_recv = mthca_tavor_post_receive;
1401 }
1402
fd9cfdd1 1403 mutex_init(&dev->cap_mask_mutex);
1da177e4
LT
1404
1405 ret = ib_register_device(&dev->ib_dev);
1406 if (ret)
1407 return ret;
1408
f4e91eb4
TJ
1409 for (i = 0; i < ARRAY_SIZE(mthca_dev_attributes); ++i) {
1410 ret = device_create_file(&dev->ib_dev.dev,
1411 mthca_dev_attributes[i]);
1da177e4
LT
1412 if (ret) {
1413 ib_unregister_device(&dev->ib_dev);
1414 return ret;
1415 }
1416 }
1417
3d155f8c
RD
1418 mthca_start_catas_poll(dev);
1419
1da177e4
LT
1420 return 0;
1421}
1422
1423void mthca_unregister_device(struct mthca_dev *dev)
1424{
3d155f8c 1425 mthca_stop_catas_poll(dev);
1da177e4
LT
1426 ib_unregister_device(&dev->ib_dev);
1427}