2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <rdma/ib_smi.h>
38 #include <rdma/ib_umem.h>
39 #include <rdma/ib_user_verbs.h>
40 #include <rdma/uverbs_ioctl.h>
42 #include <linux/sched.h>
43 #include <linux/slab.h>
44 #include <linux/stat.h>
46 #include <linux/export.h>
48 #include "mthca_dev.h"
49 #include "mthca_cmd.h"
50 #include <rdma/mthca-abi.h>
51 #include "mthca_memfree.h"
53 static void init_query_mad(struct ib_smp *mad)
55 mad->base_version = 1;
56 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
57 mad->class_version = 1;
58 mad->method = IB_MGMT_METHOD_GET;
61 static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
64 struct ib_smp *in_mad = NULL;
65 struct ib_smp *out_mad = NULL;
67 struct mthca_dev *mdev = to_mdev(ibdev);
69 if (uhw->inlen || uhw->outlen)
72 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
73 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
74 if (!in_mad || !out_mad)
77 memset(props, 0, sizeof *props);
79 props->fw_ver = mdev->fw_ver;
81 init_query_mad(in_mad);
82 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
84 err = mthca_MAD_IFC(mdev, 1, 1,
85 1, NULL, NULL, in_mad, out_mad);
89 props->device_cap_flags = mdev->device_cap_flags;
90 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
92 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
93 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
94 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
96 props->max_mr_size = ~0ull;
97 props->page_size_cap = mdev->limits.page_size_cap;
98 props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps;
99 props->max_qp_wr = mdev->limits.max_wqes;
100 props->max_send_sge = mdev->limits.max_sg;
101 props->max_recv_sge = mdev->limits.max_sg;
102 props->max_sge_rd = mdev->limits.max_sg;
103 props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
104 props->max_cqe = mdev->limits.max_cqes;
105 props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
106 props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds;
107 props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift;
108 props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma;
109 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
110 props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs;
111 props->max_srq_wr = mdev->limits.max_srq_wqes;
112 props->max_srq_sge = mdev->limits.max_srq_sge;
113 props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay;
114 props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ?
115 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
116 props->max_pkeys = mdev->limits.pkey_table_len;
117 props->max_mcast_grp = mdev->limits.num_mgms + mdev->limits.num_amgms;
118 props->max_mcast_qp_attach = MTHCA_QP_PER_MGM;
119 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
120 props->max_mcast_grp;
122 * If Sinai memory key optimization is being used, then only
123 * the 8-bit key portion will change. For other HCAs, the
124 * unused index bits will also be used for FMR remapping.
126 if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
127 props->max_map_per_fmr = 255;
129 props->max_map_per_fmr =
130 (1 << (32 - ilog2(mdev->limits.num_mpts))) - 1;
139 static int mthca_query_port(struct ib_device *ibdev,
140 u8 port, struct ib_port_attr *props)
142 struct ib_smp *in_mad = NULL;
143 struct ib_smp *out_mad = NULL;
146 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
147 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
148 if (!in_mad || !out_mad)
151 /* props being zeroed by the caller, avoid zeroing it here */
153 init_query_mad(in_mad);
154 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
155 in_mad->attr_mod = cpu_to_be32(port);
157 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
158 port, NULL, NULL, in_mad, out_mad);
162 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
163 props->lmc = out_mad->data[34] & 0x7;
164 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
165 props->sm_sl = out_mad->data[36] & 0xf;
166 props->state = out_mad->data[32] & 0xf;
167 props->phys_state = out_mad->data[33] >> 4;
168 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
169 props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len;
170 props->max_msg_sz = 0x80000000;
171 props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len;
172 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
173 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
174 props->active_width = out_mad->data[31] & 0xf;
175 props->active_speed = out_mad->data[35] >> 4;
176 props->max_mtu = out_mad->data[41] & 0xf;
177 props->active_mtu = out_mad->data[36] >> 4;
178 props->subnet_timeout = out_mad->data[51] & 0x1f;
179 props->max_vl_num = out_mad->data[37] >> 4;
180 props->init_type_reply = out_mad->data[41] >> 4;
188 static int mthca_modify_device(struct ib_device *ibdev,
190 struct ib_device_modify *props)
192 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
195 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
196 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
198 memcpy(ibdev->node_desc, props->node_desc,
199 IB_DEVICE_NODE_DESC_MAX);
200 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
206 static int mthca_modify_port(struct ib_device *ibdev,
207 u8 port, int port_modify_mask,
208 struct ib_port_modify *props)
210 struct mthca_set_ib_param set_ib;
211 struct ib_port_attr attr;
214 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
217 err = ib_query_port(ibdev, port, &attr);
221 set_ib.set_si_guid = 0;
222 set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR);
224 set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
225 ~props->clr_port_cap_mask;
227 err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port);
231 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
235 static int mthca_query_pkey(struct ib_device *ibdev,
236 u8 port, u16 index, u16 *pkey)
238 struct ib_smp *in_mad = NULL;
239 struct ib_smp *out_mad = NULL;
242 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
243 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
244 if (!in_mad || !out_mad)
247 init_query_mad(in_mad);
248 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
249 in_mad->attr_mod = cpu_to_be32(index / 32);
251 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
252 port, NULL, NULL, in_mad, out_mad);
256 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
264 static int mthca_query_gid(struct ib_device *ibdev, u8 port,
265 int index, union ib_gid *gid)
267 struct ib_smp *in_mad = NULL;
268 struct ib_smp *out_mad = NULL;
271 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
272 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
273 if (!in_mad || !out_mad)
276 init_query_mad(in_mad);
277 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
278 in_mad->attr_mod = cpu_to_be32(port);
280 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
281 port, NULL, NULL, in_mad, out_mad);
285 memcpy(gid->raw, out_mad->data + 8, 8);
287 init_query_mad(in_mad);
288 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
289 in_mad->attr_mod = cpu_to_be32(index / 8);
291 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
292 port, NULL, NULL, in_mad, out_mad);
296 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
304 static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
305 struct ib_udata *udata)
307 struct mthca_alloc_ucontext_resp uresp;
308 struct mthca_ucontext *context;
311 if (!(to_mdev(ibdev)->active))
312 return ERR_PTR(-EAGAIN);
314 memset(&uresp, 0, sizeof uresp);
316 uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
317 if (mthca_is_memfree(to_mdev(ibdev)))
318 uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size;
322 context = kzalloc(sizeof(*context), GFP_KERNEL);
324 return ERR_PTR(-ENOMEM);
326 err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
332 context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
333 if (IS_ERR(context->db_tab)) {
334 err = PTR_ERR(context->db_tab);
335 mthca_uar_free(to_mdev(ibdev), &context->uar);
340 if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
341 mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab);
342 mthca_uar_free(to_mdev(ibdev), &context->uar);
344 return ERR_PTR(-EFAULT);
347 context->reg_mr_warned = 0;
349 return &context->ibucontext;
352 static int mthca_dealloc_ucontext(struct ib_ucontext *context)
354 mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar,
355 to_mucontext(context)->db_tab);
356 mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar);
357 kfree(to_mucontext(context));
362 static int mthca_mmap_uar(struct ib_ucontext *context,
363 struct vm_area_struct *vma)
365 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
368 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
370 if (io_remap_pfn_range(vma, vma->vm_start,
371 to_mucontext(context)->uar.pfn,
372 PAGE_SIZE, vma->vm_page_prot))
378 static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
379 struct ib_udata *udata)
381 struct ib_device *ibdev = ibpd->device;
382 struct mthca_pd *pd = to_mpd(ibpd);
385 err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
390 if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
391 mthca_pd_free(to_mdev(ibdev), pd);
399 static void mthca_dealloc_pd(struct ib_pd *pd)
401 mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
404 static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
405 struct rdma_ah_attr *ah_attr,
407 struct ib_udata *udata)
413 ah = kmalloc(sizeof *ah, GFP_ATOMIC);
415 return ERR_PTR(-ENOMEM);
417 err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah);
426 static int mthca_ah_destroy(struct ib_ah *ah, u32 flags)
428 mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
434 static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
435 struct ib_srq_init_attr *init_attr,
436 struct ib_udata *udata)
438 struct mthca_create_srq ucmd;
439 struct mthca_ucontext *context = rdma_udata_to_drv_context(
440 udata, struct mthca_ucontext, ibucontext);
441 struct mthca_srq *srq;
444 if (init_attr->srq_type != IB_SRQT_BASIC)
445 return ERR_PTR(-EOPNOTSUPP);
447 srq = kmalloc(sizeof *srq, GFP_KERNEL);
449 return ERR_PTR(-ENOMEM);
452 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
457 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
458 context->db_tab, ucmd.db_index,
464 srq->mr.ibmr.lkey = ucmd.lkey;
465 srq->db_index = ucmd.db_index;
468 err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
469 &init_attr->attr, srq, udata);
472 mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
473 context->db_tab, ucmd.db_index);
478 if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
479 mthca_free_srq(to_mdev(pd->device), srq);
492 static int mthca_destroy_srq(struct ib_srq *srq)
494 struct mthca_ucontext *context;
497 context = to_mucontext(srq->uobject->context);
499 mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
500 context->db_tab, to_msrq(srq)->db_index);
503 mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
509 static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
510 struct ib_qp_init_attr *init_attr,
511 struct ib_udata *udata)
513 struct mthca_ucontext *context = rdma_udata_to_drv_context(
514 udata, struct mthca_ucontext, ibucontext);
515 struct mthca_create_qp ucmd;
519 if (init_attr->create_flags)
520 return ERR_PTR(-EINVAL);
522 switch (init_attr->qp_type) {
527 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
529 return ERR_PTR(-ENOMEM);
532 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
534 return ERR_PTR(-EFAULT);
537 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
539 ucmd.sq_db_index, ucmd.sq_db_page);
545 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
547 ucmd.rq_db_index, ucmd.rq_db_page);
549 mthca_unmap_user_db(to_mdev(pd->device),
557 qp->mr.ibmr.lkey = ucmd.lkey;
558 qp->sq.db_index = ucmd.sq_db_index;
559 qp->rq.db_index = ucmd.rq_db_index;
562 err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
563 to_mcq(init_attr->send_cq),
564 to_mcq(init_attr->recv_cq),
565 init_attr->qp_type, init_attr->sq_sig_type,
566 &init_attr->cap, qp, udata);
569 mthca_unmap_user_db(to_mdev(pd->device),
573 mthca_unmap_user_db(to_mdev(pd->device),
579 qp->ibqp.qp_num = qp->qpn;
585 /* Don't allow userspace to create special QPs */
587 return ERR_PTR(-EINVAL);
589 qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
591 return ERR_PTR(-ENOMEM);
593 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
595 err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
596 to_mcq(init_attr->send_cq),
597 to_mcq(init_attr->recv_cq),
598 init_attr->sq_sig_type, &init_attr->cap,
599 qp->ibqp.qp_num, init_attr->port_num,
604 /* Don't support raw QPs */
605 return ERR_PTR(-ENOSYS);
613 init_attr->cap.max_send_wr = qp->sq.max;
614 init_attr->cap.max_recv_wr = qp->rq.max;
615 init_attr->cap.max_send_sge = qp->sq.max_gs;
616 init_attr->cap.max_recv_sge = qp->rq.max_gs;
617 init_attr->cap.max_inline_data = qp->max_inline_data;
622 static int mthca_destroy_qp(struct ib_qp *qp)
625 mthca_unmap_user_db(to_mdev(qp->device),
626 &to_mucontext(qp->uobject->context)->uar,
627 to_mucontext(qp->uobject->context)->db_tab,
628 to_mqp(qp)->sq.db_index);
629 mthca_unmap_user_db(to_mdev(qp->device),
630 &to_mucontext(qp->uobject->context)->uar,
631 to_mucontext(qp->uobject->context)->db_tab,
632 to_mqp(qp)->rq.db_index);
634 mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
639 static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
640 const struct ib_cq_init_attr *attr,
641 struct ib_ucontext *context,
642 struct ib_udata *udata)
644 int entries = attr->cqe;
645 struct mthca_create_cq ucmd;
651 return ERR_PTR(-EINVAL);
653 if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
654 return ERR_PTR(-EINVAL);
657 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
658 return ERR_PTR(-EFAULT);
660 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
661 to_mucontext(context)->db_tab,
662 ucmd.set_db_index, ucmd.set_db_page);
666 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
667 to_mucontext(context)->db_tab,
668 ucmd.arm_db_index, ucmd.arm_db_page);
673 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
680 cq->buf.mr.ibmr.lkey = ucmd.lkey;
681 cq->set_ci_db_index = ucmd.set_db_index;
682 cq->arm_db_index = ucmd.arm_db_index;
685 for (nent = 1; nent <= entries; nent <<= 1)
688 err = mthca_init_cq(to_mdev(ibdev), nent,
689 context ? to_mucontext(context) : NULL,
690 context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
695 if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
696 mthca_free_cq(to_mdev(ibdev), cq);
701 cq->resize_buf = NULL;
710 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
711 to_mucontext(context)->db_tab, ucmd.arm_db_index);
715 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
716 to_mucontext(context)->db_tab, ucmd.set_db_index);
721 static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq,
726 spin_lock_irq(&cq->lock);
727 if (cq->resize_buf) {
732 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
733 if (!cq->resize_buf) {
738 cq->resize_buf->state = CQ_RESIZE_ALLOC;
743 spin_unlock_irq(&cq->lock);
748 ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
750 spin_lock_irq(&cq->lock);
751 kfree(cq->resize_buf);
752 cq->resize_buf = NULL;
753 spin_unlock_irq(&cq->lock);
757 cq->resize_buf->cqe = entries - 1;
759 spin_lock_irq(&cq->lock);
760 cq->resize_buf->state = CQ_RESIZE_READY;
761 spin_unlock_irq(&cq->lock);
766 static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
768 struct mthca_dev *dev = to_mdev(ibcq->device);
769 struct mthca_cq *cq = to_mcq(ibcq);
770 struct mthca_resize_cq ucmd;
774 if (entries < 1 || entries > dev->limits.max_cqes)
777 mutex_lock(&cq->mutex);
779 entries = roundup_pow_of_two(entries + 1);
780 if (entries == ibcq->cqe + 1) {
786 ret = mthca_alloc_resize_buf(dev, cq, entries);
789 lkey = cq->resize_buf->buf.mr.ibmr.lkey;
791 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
798 ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries));
801 if (cq->resize_buf) {
802 mthca_free_cq_buf(dev, &cq->resize_buf->buf,
803 cq->resize_buf->cqe);
804 kfree(cq->resize_buf);
805 spin_lock_irq(&cq->lock);
806 cq->resize_buf = NULL;
807 spin_unlock_irq(&cq->lock);
813 struct mthca_cq_buf tbuf;
816 spin_lock_irq(&cq->lock);
817 if (cq->resize_buf->state == CQ_RESIZE_READY) {
818 mthca_cq_resize_copy_cqes(cq);
821 cq->buf = cq->resize_buf->buf;
822 cq->ibcq.cqe = cq->resize_buf->cqe;
824 tbuf = cq->resize_buf->buf;
825 tcqe = cq->resize_buf->cqe;
828 kfree(cq->resize_buf);
829 cq->resize_buf = NULL;
830 spin_unlock_irq(&cq->lock);
832 mthca_free_cq_buf(dev, &tbuf, tcqe);
834 ibcq->cqe = entries - 1;
837 mutex_unlock(&cq->mutex);
842 static int mthca_destroy_cq(struct ib_cq *cq)
845 mthca_unmap_user_db(to_mdev(cq->device),
846 &to_mucontext(cq->uobject->context)->uar,
847 to_mucontext(cq->uobject->context)->db_tab,
848 to_mcq(cq)->arm_db_index);
849 mthca_unmap_user_db(to_mdev(cq->device),
850 &to_mucontext(cq->uobject->context)->uar,
851 to_mucontext(cq->uobject->context)->db_tab,
852 to_mcq(cq)->set_ci_db_index);
854 mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
860 static inline u32 convert_access(int acc)
862 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC : 0) |
863 (acc & IB_ACCESS_REMOTE_WRITE ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) |
864 (acc & IB_ACCESS_REMOTE_READ ? MTHCA_MPT_FLAG_REMOTE_READ : 0) |
865 (acc & IB_ACCESS_LOCAL_WRITE ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0) |
866 MTHCA_MPT_FLAG_LOCAL_READ;
869 static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
874 mr = kmalloc(sizeof *mr, GFP_KERNEL);
876 return ERR_PTR(-ENOMEM);
878 err = mthca_mr_alloc_notrans(to_mdev(pd->device),
880 convert_access(acc), mr);
892 static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
893 u64 virt, int acc, struct ib_udata *udata)
895 struct mthca_dev *dev = to_mdev(pd->device);
896 struct sg_dma_page_iter sg_iter;
897 struct mthca_ucontext *context = rdma_udata_to_drv_context(
898 udata, struct mthca_ucontext, ibucontext);
900 struct mthca_reg_mr ucmd;
906 if (udata->inlen < sizeof ucmd) {
907 if (!context->reg_mr_warned) {
908 mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n",
910 mthca_warn(dev, " Update libmthca to fix this.\n");
912 ++context->reg_mr_warned;
914 } else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
915 return ERR_PTR(-EFAULT);
917 mr = kmalloc(sizeof *mr, GFP_KERNEL);
919 return ERR_PTR(-ENOMEM);
921 mr->umem = ib_umem_get(udata, start, length, acc,
922 ucmd.mr_attrs & MTHCA_MR_DMASYNC);
924 if (IS_ERR(mr->umem)) {
925 err = PTR_ERR(mr->umem);
931 mr->mtt = mthca_alloc_mtt(dev, n);
932 if (IS_ERR(mr->mtt)) {
933 err = PTR_ERR(mr->mtt);
937 pages = (u64 *) __get_free_page(GFP_KERNEL);
945 write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
947 for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
948 pages[i++] = sg_page_iter_dma_address(&sg_iter);
951 * Be friendly to write_mtt and pass it chunks
952 * of appropriate size.
954 if (i == write_mtt_size) {
955 err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
964 err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
966 free_page((unsigned long) pages);
970 err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, PAGE_SHIFT, virt, length,
971 convert_access(acc), mr);
979 mthca_free_mtt(dev, mr->mtt);
982 ib_umem_release(mr->umem);
989 static int mthca_dereg_mr(struct ib_mr *mr)
991 struct mthca_mr *mmr = to_mmr(mr);
993 mthca_free_mr(to_mdev(mr->device), mmr);
995 ib_umem_release(mmr->umem);
1001 static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
1002 struct ib_fmr_attr *fmr_attr)
1004 struct mthca_fmr *fmr;
1007 fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
1009 return ERR_PTR(-ENOMEM);
1011 memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
1012 err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
1013 convert_access(mr_access_flags), fmr);
1017 return ERR_PTR(err);
1023 static int mthca_dealloc_fmr(struct ib_fmr *fmr)
1025 struct mthca_fmr *mfmr = to_mfmr(fmr);
1028 err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
1036 static int mthca_unmap_fmr(struct list_head *fmr_list)
1040 struct mthca_dev *mdev = NULL;
1042 list_for_each_entry(fmr, fmr_list, list) {
1043 if (mdev && to_mdev(fmr->device) != mdev)
1045 mdev = to_mdev(fmr->device);
1051 if (mthca_is_memfree(mdev)) {
1052 list_for_each_entry(fmr, fmr_list, list)
1053 mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
1057 list_for_each_entry(fmr, fmr_list, list)
1058 mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
1060 err = mthca_SYNC_TPT(mdev);
1064 static ssize_t hw_rev_show(struct device *device,
1065 struct device_attribute *attr, char *buf)
1067 struct mthca_dev *dev =
1068 rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
1070 return sprintf(buf, "%x\n", dev->rev_id);
1072 static DEVICE_ATTR_RO(hw_rev);
1074 static ssize_t hca_type_show(struct device *device,
1075 struct device_attribute *attr, char *buf)
1077 struct mthca_dev *dev =
1078 rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
1080 switch (dev->pdev->device) {
1081 case PCI_DEVICE_ID_MELLANOX_TAVOR:
1082 return sprintf(buf, "MT23108\n");
1083 case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT:
1084 return sprintf(buf, "MT25208 (MT23108 compat mode)\n");
1085 case PCI_DEVICE_ID_MELLANOX_ARBEL:
1086 return sprintf(buf, "MT25208\n");
1087 case PCI_DEVICE_ID_MELLANOX_SINAI:
1088 case PCI_DEVICE_ID_MELLANOX_SINAI_OLD:
1089 return sprintf(buf, "MT25204\n");
1091 return sprintf(buf, "unknown\n");
1094 static DEVICE_ATTR_RO(hca_type);
1096 static ssize_t board_id_show(struct device *device,
1097 struct device_attribute *attr, char *buf)
1099 struct mthca_dev *dev =
1100 rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
1102 return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
1104 static DEVICE_ATTR_RO(board_id);
1106 static struct attribute *mthca_dev_attributes[] = {
1107 &dev_attr_hw_rev.attr,
1108 &dev_attr_hca_type.attr,
1109 &dev_attr_board_id.attr,
1113 static const struct attribute_group mthca_attr_group = {
1114 .attrs = mthca_dev_attributes,
1117 static int mthca_init_node_data(struct mthca_dev *dev)
1119 struct ib_smp *in_mad = NULL;
1120 struct ib_smp *out_mad = NULL;
1123 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1124 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1125 if (!in_mad || !out_mad)
1128 init_query_mad(in_mad);
1129 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1131 err = mthca_MAD_IFC(dev, 1, 1,
1132 1, NULL, NULL, in_mad, out_mad);
1136 memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
1138 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1140 err = mthca_MAD_IFC(dev, 1, 1,
1141 1, NULL, NULL, in_mad, out_mad);
1145 if (mthca_is_memfree(dev))
1146 dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
1147 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1155 static int mthca_port_immutable(struct ib_device *ibdev, u8 port_num,
1156 struct ib_port_immutable *immutable)
1158 struct ib_port_attr attr;
1161 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
1163 err = ib_query_port(ibdev, port_num, &attr);
1167 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1168 immutable->gid_tbl_len = attr.gid_tbl_len;
1169 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
1174 static void get_dev_fw_str(struct ib_device *device, char *str)
1176 struct mthca_dev *dev =
1177 container_of(device, struct mthca_dev, ib_dev);
1178 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
1179 (int) (dev->fw_ver >> 32),
1180 (int) (dev->fw_ver >> 16) & 0xffff,
1181 (int) dev->fw_ver & 0xffff);
1184 static const struct ib_device_ops mthca_dev_ops = {
1185 .alloc_pd = mthca_alloc_pd,
1186 .alloc_ucontext = mthca_alloc_ucontext,
1187 .attach_mcast = mthca_multicast_attach,
1188 .create_ah = mthca_ah_create,
1189 .create_cq = mthca_create_cq,
1190 .create_qp = mthca_create_qp,
1191 .dealloc_pd = mthca_dealloc_pd,
1192 .dealloc_ucontext = mthca_dealloc_ucontext,
1193 .dereg_mr = mthca_dereg_mr,
1194 .destroy_ah = mthca_ah_destroy,
1195 .destroy_cq = mthca_destroy_cq,
1196 .destroy_qp = mthca_destroy_qp,
1197 .detach_mcast = mthca_multicast_detach,
1198 .get_dev_fw_str = get_dev_fw_str,
1199 .get_dma_mr = mthca_get_dma_mr,
1200 .get_port_immutable = mthca_port_immutable,
1201 .mmap = mthca_mmap_uar,
1202 .modify_device = mthca_modify_device,
1203 .modify_port = mthca_modify_port,
1204 .modify_qp = mthca_modify_qp,
1205 .poll_cq = mthca_poll_cq,
1206 .process_mad = mthca_process_mad,
1207 .query_ah = mthca_ah_query,
1208 .query_device = mthca_query_device,
1209 .query_gid = mthca_query_gid,
1210 .query_pkey = mthca_query_pkey,
1211 .query_port = mthca_query_port,
1212 .query_qp = mthca_query_qp,
1213 .reg_user_mr = mthca_reg_user_mr,
1214 .resize_cq = mthca_resize_cq,
1215 INIT_RDMA_OBJ_SIZE(ib_pd, mthca_pd, ibpd),
1218 static const struct ib_device_ops mthca_dev_arbel_srq_ops = {
1219 .create_srq = mthca_create_srq,
1220 .destroy_srq = mthca_destroy_srq,
1221 .modify_srq = mthca_modify_srq,
1222 .post_srq_recv = mthca_arbel_post_srq_recv,
1223 .query_srq = mthca_query_srq,
1226 static const struct ib_device_ops mthca_dev_tavor_srq_ops = {
1227 .create_srq = mthca_create_srq,
1228 .destroy_srq = mthca_destroy_srq,
1229 .modify_srq = mthca_modify_srq,
1230 .post_srq_recv = mthca_tavor_post_srq_recv,
1231 .query_srq = mthca_query_srq,
1234 static const struct ib_device_ops mthca_dev_arbel_fmr_ops = {
1235 .alloc_fmr = mthca_alloc_fmr,
1236 .dealloc_fmr = mthca_dealloc_fmr,
1237 .map_phys_fmr = mthca_arbel_map_phys_fmr,
1238 .unmap_fmr = mthca_unmap_fmr,
1241 static const struct ib_device_ops mthca_dev_tavor_fmr_ops = {
1242 .alloc_fmr = mthca_alloc_fmr,
1243 .dealloc_fmr = mthca_dealloc_fmr,
1244 .map_phys_fmr = mthca_tavor_map_phys_fmr,
1245 .unmap_fmr = mthca_unmap_fmr,
1248 static const struct ib_device_ops mthca_dev_arbel_ops = {
1249 .post_recv = mthca_arbel_post_receive,
1250 .post_send = mthca_arbel_post_send,
1251 .req_notify_cq = mthca_arbel_arm_cq,
1254 static const struct ib_device_ops mthca_dev_tavor_ops = {
1255 .post_recv = mthca_tavor_post_receive,
1256 .post_send = mthca_tavor_post_send,
1257 .req_notify_cq = mthca_tavor_arm_cq,
1260 int mthca_register_device(struct mthca_dev *dev)
1264 ret = mthca_init_node_data(dev);
1268 dev->ib_dev.owner = THIS_MODULE;
1270 dev->ib_dev.uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION;
1271 dev->ib_dev.uverbs_cmd_mask =
1272 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1273 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1274 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1275 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1276 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1277 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1278 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1279 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1280 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1281 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
1282 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1283 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1284 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
1285 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1286 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1287 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
1288 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
1289 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
1290 dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
1291 dev->ib_dev.num_comp_vectors = 1;
1292 dev->ib_dev.dev.parent = &dev->pdev->dev;
1294 if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
1295 dev->ib_dev.uverbs_cmd_mask |=
1296 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1297 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1298 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1299 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1301 if (mthca_is_memfree(dev))
1302 ib_set_device_ops(&dev->ib_dev,
1303 &mthca_dev_arbel_srq_ops);
1305 ib_set_device_ops(&dev->ib_dev,
1306 &mthca_dev_tavor_srq_ops);
1309 if (dev->mthca_flags & MTHCA_FLAG_FMR) {
1310 if (mthca_is_memfree(dev))
1311 ib_set_device_ops(&dev->ib_dev,
1312 &mthca_dev_arbel_fmr_ops);
1314 ib_set_device_ops(&dev->ib_dev,
1315 &mthca_dev_tavor_fmr_ops);
1318 ib_set_device_ops(&dev->ib_dev, &mthca_dev_ops);
1320 if (mthca_is_memfree(dev))
1321 ib_set_device_ops(&dev->ib_dev, &mthca_dev_arbel_ops);
1323 ib_set_device_ops(&dev->ib_dev, &mthca_dev_tavor_ops);
1325 mutex_init(&dev->cap_mask_mutex);
1327 rdma_set_device_sysfs_group(&dev->ib_dev, &mthca_attr_group);
1328 dev->ib_dev.driver_id = RDMA_DRIVER_MTHCA;
1329 ret = ib_register_device(&dev->ib_dev, "mthca%d");
1333 mthca_start_catas_poll(dev);
1338 void mthca_unregister_device(struct mthca_dev *dev)
1340 mthca_stop_catas_poll(dev);
1341 ib_unregister_device(&dev->ib_dev);