2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/dma-mapping.h>
35 #include <net/addrconf.h>
36 #include <rdma/uverbs_ioctl.h>
39 #include "rxe_queue.h"
40 #include "rxe_hw_counters.h"
42 static int rxe_query_device(struct ib_device *dev,
43 struct ib_device_attr *attr,
46 struct rxe_dev *rxe = to_rdev(dev);
48 if (uhw->inlen || uhw->outlen)
55 static int rxe_query_port(struct ib_device *dev,
56 u8 port_num, struct ib_port_attr *attr)
58 struct rxe_dev *rxe = to_rdev(dev);
59 struct rxe_port *port;
64 /* *attr being zeroed by the caller, avoid zeroing it here */
67 mutex_lock(&rxe->usdev_lock);
68 rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
71 if (attr->state == IB_PORT_ACTIVE)
72 attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
73 else if (dev_get_flags(rxe->ndev) & IFF_UP)
74 attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
76 attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
78 mutex_unlock(&rxe->usdev_lock);
83 static int rxe_query_pkey(struct ib_device *device,
84 u8 port_num, u16 index, u16 *pkey)
89 *pkey = IB_DEFAULT_PKEY_FULL;
93 static int rxe_modify_device(struct ib_device *dev,
94 int mask, struct ib_device_modify *attr)
96 struct rxe_dev *rxe = to_rdev(dev);
98 if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
99 IB_DEVICE_MODIFY_NODE_DESC))
102 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
103 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
105 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
106 memcpy(rxe->ib_dev.node_desc,
107 attr->node_desc, sizeof(rxe->ib_dev.node_desc));
113 static int rxe_modify_port(struct ib_device *dev,
114 u8 port_num, int mask, struct ib_port_modify *attr)
116 struct rxe_dev *rxe = to_rdev(dev);
117 struct rxe_port *port;
121 port->attr.port_cap_flags |= attr->set_port_cap_mask;
122 port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
124 if (mask & IB_PORT_RESET_QKEY_CNTR)
125 port->attr.qkey_viol_cntr = 0;
130 static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
133 return IB_LINK_LAYER_ETHERNET;
136 static int rxe_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
138 struct rxe_dev *rxe = to_rdev(uctx->device);
139 struct rxe_ucontext *uc = to_ruc(uctx);
141 return rxe_add_to_pool(&rxe->uc_pool, &uc->pelem);
144 static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
146 struct rxe_ucontext *uc = to_ruc(ibuc);
151 static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
152 struct ib_port_immutable *immutable)
155 struct ib_port_attr attr;
157 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
159 err = ib_query_port(dev, port_num, &attr);
163 immutable->pkey_tbl_len = attr.pkey_tbl_len;
164 immutable->gid_tbl_len = attr.gid_tbl_len;
165 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
170 static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
172 struct rxe_dev *rxe = to_rdev(ibpd->device);
173 struct rxe_pd *pd = to_rpd(ibpd);
175 return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem);
178 static void rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
180 struct rxe_pd *pd = to_rpd(ibpd);
185 static int rxe_create_ah(struct ib_ah *ibah,
186 struct rdma_ah_init_attr *init_attr,
187 struct ib_udata *udata)
191 struct rxe_dev *rxe = to_rdev(ibah->device);
192 struct rxe_ah *ah = to_rah(ibah);
194 err = rxe_av_chk_attr(rxe, init_attr->ah_attr);
198 err = rxe_add_to_pool(&rxe->ah_pool, &ah->pelem);
202 rxe_init_av(init_attr->ah_attr, &ah->av);
206 static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
209 struct rxe_dev *rxe = to_rdev(ibah->device);
210 struct rxe_ah *ah = to_rah(ibah);
212 err = rxe_av_chk_attr(rxe, attr);
216 rxe_init_av(attr, &ah->av);
220 static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
222 struct rxe_ah *ah = to_rah(ibah);
224 memset(attr, 0, sizeof(*attr));
225 attr->type = ibah->type;
226 rxe_av_to_attr(&ah->av, attr);
230 static void rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
232 struct rxe_ah *ah = to_rah(ibah);
237 static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
242 struct rxe_recv_wqe *recv_wqe;
243 int num_sge = ibwr->num_sge;
245 if (unlikely(queue_full(rq->queue))) {
250 if (unlikely(num_sge > rq->max_sge)) {
256 for (i = 0; i < num_sge; i++)
257 length += ibwr->sg_list[i].length;
259 recv_wqe = producer_addr(rq->queue);
260 recv_wqe->wr_id = ibwr->wr_id;
261 recv_wqe->num_sge = num_sge;
263 memcpy(recv_wqe->dma.sge, ibwr->sg_list,
264 num_sge * sizeof(struct ib_sge));
266 recv_wqe->dma.length = length;
267 recv_wqe->dma.resid = length;
268 recv_wqe->dma.num_sge = num_sge;
269 recv_wqe->dma.cur_sge = 0;
270 recv_wqe->dma.sge_offset = 0;
272 /* make sure all changes to the work queue are written before we
273 * update the producer pointer
277 advance_producer(rq->queue);
284 static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
285 struct ib_udata *udata)
288 struct rxe_dev *rxe = to_rdev(ibsrq->device);
289 struct rxe_pd *pd = to_rpd(ibsrq->pd);
290 struct rxe_srq *srq = to_rsrq(ibsrq);
291 struct rxe_create_srq_resp __user *uresp = NULL;
294 if (udata->outlen < sizeof(*uresp))
296 uresp = udata->outbuf;
299 err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
303 err = rxe_add_to_pool(&rxe->srq_pool, &srq->pelem);
310 err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
323 static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
324 enum ib_srq_attr_mask mask,
325 struct ib_udata *udata)
328 struct rxe_srq *srq = to_rsrq(ibsrq);
329 struct rxe_dev *rxe = to_rdev(ibsrq->device);
330 struct rxe_modify_srq_cmd ucmd = {};
333 if (udata->inlen < sizeof(ucmd))
336 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
341 err = rxe_srq_chk_attr(rxe, srq, attr, mask);
345 err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata);
355 static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
357 struct rxe_srq *srq = to_rsrq(ibsrq);
362 attr->max_wr = srq->rq.queue->buf->index_mask;
363 attr->max_sge = srq->rq.max_sge;
364 attr->srq_limit = srq->limit;
368 static void rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
370 struct rxe_srq *srq = to_rsrq(ibsrq);
373 rxe_queue_cleanup(srq->rq.queue);
375 rxe_drop_ref(srq->pd);
379 static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
380 const struct ib_recv_wr **bad_wr)
384 struct rxe_srq *srq = to_rsrq(ibsrq);
386 spin_lock_irqsave(&srq->rq.producer_lock, flags);
389 err = post_one_recv(&srq->rq, wr);
395 spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
403 static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
404 struct ib_qp_init_attr *init,
405 struct ib_udata *udata)
408 struct rxe_dev *rxe = to_rdev(ibpd->device);
409 struct rxe_pd *pd = to_rpd(ibpd);
411 struct rxe_create_qp_resp __user *uresp = NULL;
414 if (udata->outlen < sizeof(*uresp))
415 return ERR_PTR(-EINVAL);
416 uresp = udata->outbuf;
419 err = rxe_qp_chk_init(rxe, init);
423 qp = rxe_alloc(&rxe->qp_pool);
439 err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd, udata);
453 static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
454 int mask, struct ib_udata *udata)
457 struct rxe_dev *rxe = to_rdev(ibqp->device);
458 struct rxe_qp *qp = to_rqp(ibqp);
460 err = rxe_qp_chk_attr(rxe, qp, attr, mask);
464 err = rxe_qp_from_attr(qp, attr, mask, udata);
474 static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
475 int mask, struct ib_qp_init_attr *init)
477 struct rxe_qp *qp = to_rqp(ibqp);
479 rxe_qp_to_init(qp, init);
480 rxe_qp_to_attr(qp, attr, mask);
485 static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
487 struct rxe_qp *qp = to_rqp(ibqp);
495 static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
496 unsigned int mask, unsigned int length)
498 int num_sge = ibwr->num_sge;
499 struct rxe_sq *sq = &qp->sq;
501 if (unlikely(num_sge > sq->max_sge))
504 if (unlikely(mask & WR_ATOMIC_MASK)) {
508 if (atomic_wr(ibwr)->remote_addr & 0x7)
512 if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
513 (length > sq->max_inline)))
522 static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
523 const struct ib_send_wr *ibwr)
525 wr->wr_id = ibwr->wr_id;
526 wr->num_sge = ibwr->num_sge;
527 wr->opcode = ibwr->opcode;
528 wr->send_flags = ibwr->send_flags;
530 if (qp_type(qp) == IB_QPT_UD ||
531 qp_type(qp) == IB_QPT_SMI ||
532 qp_type(qp) == IB_QPT_GSI) {
533 wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
534 wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
535 if (qp_type(qp) == IB_QPT_GSI)
536 wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
537 if (wr->opcode == IB_WR_SEND_WITH_IMM)
538 wr->ex.imm_data = ibwr->ex.imm_data;
540 switch (wr->opcode) {
541 case IB_WR_RDMA_WRITE_WITH_IMM:
542 wr->ex.imm_data = ibwr->ex.imm_data;
544 case IB_WR_RDMA_READ:
545 case IB_WR_RDMA_WRITE:
546 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
547 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
549 case IB_WR_SEND_WITH_IMM:
550 wr->ex.imm_data = ibwr->ex.imm_data;
552 case IB_WR_SEND_WITH_INV:
553 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
555 case IB_WR_ATOMIC_CMP_AND_SWP:
556 case IB_WR_ATOMIC_FETCH_AND_ADD:
557 wr->wr.atomic.remote_addr =
558 atomic_wr(ibwr)->remote_addr;
559 wr->wr.atomic.compare_add =
560 atomic_wr(ibwr)->compare_add;
561 wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
562 wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
564 case IB_WR_LOCAL_INV:
565 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
568 wr->wr.reg.mr = reg_wr(ibwr)->mr;
569 wr->wr.reg.key = reg_wr(ibwr)->key;
570 wr->wr.reg.access = reg_wr(ibwr)->access;
578 static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
579 unsigned int mask, unsigned int length,
580 struct rxe_send_wqe *wqe)
582 int num_sge = ibwr->num_sge;
587 init_send_wr(qp, &wqe->wr, ibwr);
589 if (qp_type(qp) == IB_QPT_UD ||
590 qp_type(qp) == IB_QPT_SMI ||
591 qp_type(qp) == IB_QPT_GSI)
592 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
594 if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
595 p = wqe->dma.inline_data;
598 for (i = 0; i < num_sge; i++, sge++) {
599 memcpy(p, (void *)(uintptr_t)sge->addr,
604 } else if (mask & WR_REG_MASK) {
606 wqe->state = wqe_state_posted;
609 memcpy(wqe->dma.sge, ibwr->sg_list,
610 num_sge * sizeof(struct ib_sge));
612 wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
613 mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
615 wqe->dma.length = length;
616 wqe->dma.resid = length;
617 wqe->dma.num_sge = num_sge;
618 wqe->dma.cur_sge = 0;
619 wqe->dma.sge_offset = 0;
620 wqe->state = wqe_state_posted;
621 wqe->ssn = atomic_add_return(1, &qp->ssn);
626 static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
627 unsigned int mask, u32 length)
630 struct rxe_sq *sq = &qp->sq;
631 struct rxe_send_wqe *send_wqe;
634 err = validate_send_wr(qp, ibwr, mask, length);
638 spin_lock_irqsave(&qp->sq.sq_lock, flags);
640 if (unlikely(queue_full(sq->queue))) {
645 send_wqe = producer_addr(sq->queue);
647 err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
652 * make sure all changes to the work queue are
653 * written before we update the producer pointer
657 advance_producer(sq->queue);
658 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
663 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
667 static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
668 const struct ib_send_wr **bad_wr)
672 unsigned int length = 0;
674 struct ib_send_wr *next;
677 mask = wr_opcode_mask(wr->opcode, qp);
678 if (unlikely(!mask)) {
684 if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
685 !(mask & WR_INLINE_MASK))) {
694 for (i = 0; i < wr->num_sge; i++)
695 length += wr->sg_list[i].length;
697 err = post_one_send(qp, wr, mask, length);
706 rxe_run_task(&qp->req.task, 1);
707 if (unlikely(qp->req.state == QP_STATE_ERROR))
708 rxe_run_task(&qp->comp.task, 1);
713 static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
714 const struct ib_send_wr **bad_wr)
716 struct rxe_qp *qp = to_rqp(ibqp);
718 if (unlikely(!qp->valid)) {
723 if (unlikely(qp->req.state < QP_STATE_READY)) {
729 /* Utilize process context to do protocol processing */
730 rxe_run_task(&qp->req.task, 0);
733 return rxe_post_send_kernel(qp, wr, bad_wr);
736 static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
737 const struct ib_recv_wr **bad_wr)
740 struct rxe_qp *qp = to_rqp(ibqp);
741 struct rxe_rq *rq = &qp->rq;
744 if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
750 if (unlikely(qp->srq)) {
756 spin_lock_irqsave(&rq->producer_lock, flags);
759 err = post_one_recv(rq, wr);
767 spin_unlock_irqrestore(&rq->producer_lock, flags);
769 if (qp->resp.state == QP_STATE_ERROR)
770 rxe_run_task(&qp->resp.task, 1);
776 static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
777 struct ib_udata *udata)
780 struct ib_device *dev = ibcq->device;
781 struct rxe_dev *rxe = to_rdev(dev);
782 struct rxe_cq *cq = to_rcq(ibcq);
783 struct rxe_create_cq_resp __user *uresp = NULL;
786 if (udata->outlen < sizeof(*uresp))
788 uresp = udata->outbuf;
794 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
798 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
803 return rxe_add_to_pool(&rxe->cq_pool, &cq->pelem);
806 static void rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
808 struct rxe_cq *cq = to_rcq(ibcq);
815 static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
818 struct rxe_cq *cq = to_rcq(ibcq);
819 struct rxe_dev *rxe = to_rdev(ibcq->device);
820 struct rxe_resize_cq_resp __user *uresp = NULL;
823 if (udata->outlen < sizeof(*uresp))
825 uresp = udata->outbuf;
828 err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
832 err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
842 static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
845 struct rxe_cq *cq = to_rcq(ibcq);
849 spin_lock_irqsave(&cq->cq_lock, flags);
850 for (i = 0; i < num_entries; i++) {
851 cqe = queue_head(cq->queue);
855 memcpy(wc++, &cqe->ibwc, sizeof(*wc));
856 advance_consumer(cq->queue);
858 spin_unlock_irqrestore(&cq->cq_lock, flags);
863 static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
865 struct rxe_cq *cq = to_rcq(ibcq);
866 int count = queue_count(cq->queue);
868 return (count > wc_cnt) ? wc_cnt : count;
871 static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
873 struct rxe_cq *cq = to_rcq(ibcq);
874 unsigned long irq_flags;
877 spin_lock_irqsave(&cq->cq_lock, irq_flags);
878 if (cq->notify != IB_CQ_NEXT_COMP)
879 cq->notify = flags & IB_CQ_SOLICITED_MASK;
881 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
884 spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
889 static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
891 struct rxe_dev *rxe = to_rdev(ibpd->device);
892 struct rxe_pd *pd = to_rpd(ibpd);
895 mr = rxe_alloc(&rxe->mr_pool);
897 return ERR_PTR(-ENOMEM);
901 rxe_mem_init_dma(pd, access, mr);
906 static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
910 int access, struct ib_udata *udata)
913 struct rxe_dev *rxe = to_rdev(ibpd->device);
914 struct rxe_pd *pd = to_rpd(ibpd);
917 mr = rxe_alloc(&rxe->mr_pool);
927 err = rxe_mem_init_user(pd, start, length, iova,
942 static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
944 struct rxe_mem *mr = to_rmr(ibmr);
946 mr->state = RXE_MEM_STATE_ZOMBIE;
947 rxe_drop_ref(mr->pd);
953 static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
956 struct rxe_dev *rxe = to_rdev(ibpd->device);
957 struct rxe_pd *pd = to_rpd(ibpd);
961 if (mr_type != IB_MR_TYPE_MEM_REG)
962 return ERR_PTR(-EINVAL);
964 mr = rxe_alloc(&rxe->mr_pool);
974 err = rxe_mem_init_fast(pd, max_num_sg, mr);
988 static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
990 struct rxe_mem *mr = to_rmr(ibmr);
992 struct rxe_phys_buf *buf;
994 if (unlikely(mr->nbuf == mr->num_buf))
997 map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
998 buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1001 buf->size = ibmr->page_size;
1007 static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1008 int sg_nents, unsigned int *sg_offset)
1010 struct rxe_mem *mr = to_rmr(ibmr);
1015 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1017 mr->va = ibmr->iova;
1018 mr->iova = ibmr->iova;
1019 mr->length = ibmr->length;
1020 mr->page_shift = ilog2(ibmr->page_size);
1021 mr->page_mask = ibmr->page_size - 1;
1022 mr->offset = mr->iova & mr->page_mask;
1027 static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1030 struct rxe_dev *rxe = to_rdev(ibqp->device);
1031 struct rxe_qp *qp = to_rqp(ibqp);
1032 struct rxe_mc_grp *grp;
1034 /* takes a ref on grp if successful */
1035 err = rxe_mcast_get_grp(rxe, mgid, &grp);
1039 err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1045 static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1047 struct rxe_dev *rxe = to_rdev(ibqp->device);
1048 struct rxe_qp *qp = to_rqp(ibqp);
1050 return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1053 static ssize_t parent_show(struct device *device,
1054 struct device_attribute *attr, char *buf)
1056 struct rxe_dev *rxe =
1057 rdma_device_to_drv_device(device, struct rxe_dev, ib_dev);
1059 return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
1062 static DEVICE_ATTR_RO(parent);
1064 static struct attribute *rxe_dev_attributes[] = {
1065 &dev_attr_parent.attr,
1069 static const struct attribute_group rxe_attr_group = {
1070 .attrs = rxe_dev_attributes,
1073 static int rxe_enable_driver(struct ib_device *ib_dev)
1075 struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
1077 rxe_set_port_state(rxe);
1078 dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(rxe->ndev));
1082 static const struct ib_device_ops rxe_dev_ops = {
1083 .owner = THIS_MODULE,
1084 .driver_id = RDMA_DRIVER_RXE,
1085 .uverbs_abi_ver = RXE_UVERBS_ABI_VERSION,
1087 .alloc_hw_stats = rxe_ib_alloc_hw_stats,
1088 .alloc_mr = rxe_alloc_mr,
1089 .alloc_pd = rxe_alloc_pd,
1090 .alloc_ucontext = rxe_alloc_ucontext,
1091 .attach_mcast = rxe_attach_mcast,
1092 .create_ah = rxe_create_ah,
1093 .create_cq = rxe_create_cq,
1094 .create_qp = rxe_create_qp,
1095 .create_srq = rxe_create_srq,
1096 .dealloc_driver = rxe_dealloc,
1097 .dealloc_pd = rxe_dealloc_pd,
1098 .dealloc_ucontext = rxe_dealloc_ucontext,
1099 .dereg_mr = rxe_dereg_mr,
1100 .destroy_ah = rxe_destroy_ah,
1101 .destroy_cq = rxe_destroy_cq,
1102 .destroy_qp = rxe_destroy_qp,
1103 .destroy_srq = rxe_destroy_srq,
1104 .detach_mcast = rxe_detach_mcast,
1105 .enable_driver = rxe_enable_driver,
1106 .get_dma_mr = rxe_get_dma_mr,
1107 .get_hw_stats = rxe_ib_get_hw_stats,
1108 .get_link_layer = rxe_get_link_layer,
1109 .get_port_immutable = rxe_port_immutable,
1110 .map_mr_sg = rxe_map_mr_sg,
1112 .modify_ah = rxe_modify_ah,
1113 .modify_device = rxe_modify_device,
1114 .modify_port = rxe_modify_port,
1115 .modify_qp = rxe_modify_qp,
1116 .modify_srq = rxe_modify_srq,
1117 .peek_cq = rxe_peek_cq,
1118 .poll_cq = rxe_poll_cq,
1119 .post_recv = rxe_post_recv,
1120 .post_send = rxe_post_send,
1121 .post_srq_recv = rxe_post_srq_recv,
1122 .query_ah = rxe_query_ah,
1123 .query_device = rxe_query_device,
1124 .query_pkey = rxe_query_pkey,
1125 .query_port = rxe_query_port,
1126 .query_qp = rxe_query_qp,
1127 .query_srq = rxe_query_srq,
1128 .reg_user_mr = rxe_reg_user_mr,
1129 .req_notify_cq = rxe_req_notify_cq,
1130 .resize_cq = rxe_resize_cq,
1132 INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah),
1133 INIT_RDMA_OBJ_SIZE(ib_cq, rxe_cq, ibcq),
1134 INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
1135 INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
1136 INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
1139 int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
1142 struct ib_device *dev = &rxe->ib_dev;
1143 struct crypto_shash *tfm;
1145 strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1147 dev->node_type = RDMA_NODE_IB_CA;
1148 dev->phys_port_cnt = 1;
1149 dev->num_comp_vectors = num_possible_cpus();
1150 dev->dev.parent = rxe_dma_device(rxe);
1151 dev->local_dma_lkey = 0;
1152 addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1153 rxe->ndev->dev_addr);
1154 dev->dev.dma_ops = &dma_virt_ops;
1155 dev->dev.dma_parms = &rxe->dma_parms;
1156 rxe->dma_parms = (struct device_dma_parameters)
1157 { .max_segment_size = SZ_2G };
1158 dma_coerce_mask_and_coherent(&dev->dev,
1159 dma_get_required_mask(&dev->dev));
1161 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1162 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1163 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1164 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1165 | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1166 | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1167 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1168 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1169 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1170 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1171 | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1172 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1173 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1174 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1175 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1176 | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1177 | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1178 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1179 | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1180 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1181 | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1182 | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1183 | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1184 | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1185 | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1186 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1187 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1188 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1189 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1190 | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1191 | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1194 ib_set_device_ops(dev, &rxe_dev_ops);
1195 err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
1199 tfm = crypto_alloc_shash("crc32", 0, 0);
1201 pr_err("failed to allocate crc algorithm err:%ld\n",
1203 return PTR_ERR(tfm);
1207 rdma_set_device_sysfs_group(dev, &rxe_attr_group);
1208 err = ib_register_device(dev, ibdev_name);
1210 pr_warn("%s failed with error %d\n", __func__, err);
1213 * Note that rxe may be invalid at this point if another thread