qedr: Add user space support for SRQ
authorYuval Bason <yuval.bason@cavium.com>
Thu, 9 Aug 2018 14:29:38 +0000 (17:29 +0300)
committerJason Gunthorpe <jgg@mellanox.com>
Tue, 14 Aug 2018 22:31:54 +0000 (16:31 -0600)
This patch adds support for SRQ's created in user space and update
qedr_affiliated_event to deal with general SRQ events.

Signed-off-by: Michal Kalderon <michal.kalderon@cavium.com>
Signed-off-by: Yuval Bason <yuval.bason@cavium.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/hw/qedr/main.c
drivers/infiniband/hw/qedr/verbs.c
include/uapi/rdma/qedr-abi.h

index 2642caf94b55158e0ddc176780bb2f3d9c0fdaaa..a0af6d424aeda58b54c58146f9761e782713e89b 100644 (file)
@@ -191,6 +191,11 @@ static int qedr_register_device(struct qedr_dev *dev)
                                     QEDR_UVERBS(MODIFY_QP) |
                                     QEDR_UVERBS(QUERY_QP) |
                                     QEDR_UVERBS(DESTROY_QP) |
+                                    QEDR_UVERBS(CREATE_SRQ) |
+                                    QEDR_UVERBS(DESTROY_SRQ) |
+                                    QEDR_UVERBS(QUERY_SRQ) |
+                                    QEDR_UVERBS(MODIFY_SRQ) |
+                                    QEDR_UVERBS(POST_SRQ_RECV) |
                                     QEDR_UVERBS(REG_MR) |
                                     QEDR_UVERBS(DEREG_MR) |
                                     QEDR_UVERBS(POLL_CQ) |
@@ -658,42 +663,70 @@ static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
 #define EVENT_TYPE_NOT_DEFINED 0
 #define EVENT_TYPE_CQ          1
 #define EVENT_TYPE_QP          2
+#define EVENT_TYPE_SRQ         3
        struct qedr_dev *dev = (struct qedr_dev *)context;
        struct regpair *async_handle = (struct regpair *)fw_handle;
        u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo;
        u8 event_type = EVENT_TYPE_NOT_DEFINED;
        struct ib_event event;
+       struct ib_srq *ibsrq;
+       struct qedr_srq *srq;
+       unsigned long flags;
        struct ib_cq *ibcq;
        struct ib_qp *ibqp;
        struct qedr_cq *cq;
        struct qedr_qp *qp;
+       u16 srq_id;
 
-       switch (e_code) {
-       case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
-               event.event = IB_EVENT_CQ_ERR;
-               event_type = EVENT_TYPE_CQ;
-               break;
-       case ROCE_ASYNC_EVENT_SQ_DRAINED:
-               event.event = IB_EVENT_SQ_DRAINED;
-               event_type = EVENT_TYPE_QP;
-               break;
-       case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
-               event.event = IB_EVENT_QP_FATAL;
-               event_type = EVENT_TYPE_QP;
-               break;
-       case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
-               event.event = IB_EVENT_QP_REQ_ERR;
-               event_type = EVENT_TYPE_QP;
-               break;
-       case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
-               event.event = IB_EVENT_QP_ACCESS_ERR;
-               event_type = EVENT_TYPE_QP;
-               break;
-       default:
+       if (IS_ROCE(dev)) {
+               switch (e_code) {
+               case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
+                       event.event = IB_EVENT_CQ_ERR;
+                       event_type = EVENT_TYPE_CQ;
+                       break;
+               case ROCE_ASYNC_EVENT_SQ_DRAINED:
+                       event.event = IB_EVENT_SQ_DRAINED;
+                       event_type = EVENT_TYPE_QP;
+                       break;
+               case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
+                       event.event = IB_EVENT_QP_FATAL;
+                       event_type = EVENT_TYPE_QP;
+                       break;
+               case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
+                       event.event = IB_EVENT_QP_REQ_ERR;
+                       event_type = EVENT_TYPE_QP;
+                       break;
+               case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
+                       event.event = IB_EVENT_QP_ACCESS_ERR;
+                       event_type = EVENT_TYPE_QP;
+                       break;
+               case ROCE_ASYNC_EVENT_SRQ_LIMIT:
+                       event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+                       event_type = EVENT_TYPE_SRQ;
+                       break;
+               case ROCE_ASYNC_EVENT_SRQ_EMPTY:
+                       event.event = IB_EVENT_SRQ_ERR;
+                       event_type = EVENT_TYPE_SRQ;
+                       break;
+               default:
+                       DP_ERR(dev, "unsupported event %d on handle=%llx\n",
+                              e_code, roce_handle64);
+               }
+       } else {
+               switch (e_code) {
+               case QED_IWARP_EVENT_SRQ_LIMIT:
+                       event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+                       event_type = EVENT_TYPE_SRQ;
+                       break;
+               case QED_IWARP_EVENT_SRQ_EMPTY:
+                       event.event = IB_EVENT_SRQ_ERR;
+                       event_type = EVENT_TYPE_SRQ;
+                       break;
+               default:
                DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code,
                       roce_handle64);
+               }
        }
-
        switch (event_type) {
        case EVENT_TYPE_CQ:
                cq = (struct qedr_cq *)(uintptr_t)roce_handle64;
@@ -727,6 +760,25 @@ static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
                }
                DP_ERR(dev, "QP event %d on handle %p\n", e_code, qp);
                break;
+       case EVENT_TYPE_SRQ:
+               srq_id = (u16)roce_handle64;
+               spin_lock_irqsave(&dev->srqidr.idr_lock, flags);
+               srq = idr_find(&dev->srqidr.idr, srq_id);
+               if (srq) {
+                       ibsrq = &srq->ibsrq;
+                       if (ibsrq->event_handler) {
+                               event.device = ibsrq->device;
+                               event.element.srq = ibsrq;
+                               ibsrq->event_handler(&event,
+                                                    ibsrq->srq_context);
+                       }
+               } else {
+                       DP_NOTICE(dev,
+                                 "SRQ event with NULL pointer ibsrq. Handle=%llx\n",
+                                 roce_handle64);
+               }
+               spin_unlock_irqrestore(&dev->srqidr.idr_lock, flags);
+               DP_NOTICE(dev, "SRQ event %d on handle %p\n", e_code, srq);
        default:
                break;
        }
index 7c75fc36e5ec406ca2724a35ca63c4ead5c2a8c9..3f46fc14ee383f4d0f876183738a43c232fb374f 100644 (file)
@@ -1199,6 +1199,21 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
        return 0;
 }
 
+static int qedr_copy_srq_uresp(struct qedr_dev *dev,
+                              struct qedr_srq *srq, struct ib_udata *udata)
+{
+       struct qedr_create_srq_uresp uresp = {};
+       int rc;
+
+       uresp.srq_id = srq->srq_id;
+
+       rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+       if (rc)
+               DP_ERR(dev, "create srq: problem copying data to user space\n");
+
+       return rc;
+}
+
 static void qedr_copy_rq_uresp(struct qedr_dev *dev,
                               struct qedr_create_qp_uresp *uresp,
                               struct qedr_qp *qp)
@@ -1321,6 +1336,13 @@ static int qedr_check_srq_params(struct ib_pd *ibpd, struct qedr_dev *dev,
        return 0;
 }
 
+static void qedr_free_srq_user_params(struct qedr_srq *srq)
+{
+       qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
+       ib_umem_release(srq->usrq.umem);
+       ib_umem_release(srq->prod_umem);
+}
+
 static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
 {
        struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
@@ -1333,6 +1355,37 @@ static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
                          hw_srq->phy_prod_pair_addr);
 }
 
+static int qedr_init_srq_user_params(struct ib_ucontext *ib_ctx,
+                                    struct qedr_srq *srq,
+                                    struct qedr_create_srq_ureq *ureq,
+                                    int access, int dmasync)
+{
+       struct scatterlist *sg;
+       int rc;
+
+       rc = qedr_init_user_queue(ib_ctx, srq->dev, &srq->usrq, ureq->srq_addr,
+                                 ureq->srq_len, access, dmasync, 1);
+       if (rc)
+               return rc;
+
+       srq->prod_umem = ib_umem_get(ib_ctx, ureq->prod_pair_addr,
+                                    sizeof(struct rdma_srq_producers),
+                                    access, dmasync);
+       if (IS_ERR(srq->prod_umem)) {
+               qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
+               ib_umem_release(srq->usrq.umem);
+               DP_ERR(srq->dev,
+                      "create srq: failed ib_umem_get for producer, got %ld\n",
+                      PTR_ERR(srq->prod_umem));
+               return PTR_ERR(srq->prod_umem);
+       }
+
+       sg = srq->prod_umem->sg_head.sgl;
+       srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
+
+       return 0;
+}
+
 static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
                                        struct qedr_dev *dev,
                                        struct ib_srq_init_attr *init_attr)
@@ -1390,10 +1443,12 @@ struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
        struct qedr_dev *dev = get_qedr_dev(ibpd->device);
        struct qed_rdma_create_srq_out_params out_params;
        struct qedr_pd *pd = get_qedr_pd(ibpd);
+       struct qedr_create_srq_ureq ureq = {};
        u64 pbl_base_addr, phy_prod_pair_addr;
+       struct ib_ucontext *ib_ctx = NULL;
        struct qedr_srq_hwq_info *hw_srq;
+       struct qedr_ucontext *ctx = NULL;
        u32 page_cnt, page_size;
-       struct qed_chain *pbl;
        struct qedr_srq *srq;
        int rc = 0;
 
@@ -1416,15 +1471,38 @@ struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
        hw_srq->max_wr = init_attr->attr.max_wr;
        hw_srq->max_sges = init_attr->attr.max_sge;
 
-       rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr);
-       if (rc)
-               goto err0;
+       if (udata && ibpd->uobject && ibpd->uobject->context) {
+               ib_ctx = ibpd->uobject->context;
+               ctx = get_qedr_ucontext(ib_ctx);
+
+               if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+                       DP_ERR(dev,
+                              "create srq: problem copying data from user space\n");
+                       goto err0;
+               }
+
+               rc = qedr_init_srq_user_params(ib_ctx, srq, &ureq, 0, 0);
+               if (rc)
+                       goto err0;
+
+               page_cnt = srq->usrq.pbl_info.num_pbes;
+               pbl_base_addr = srq->usrq.pbl_tbl->pa;
+               phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
+               page_size = BIT(srq->usrq.umem->page_shift);
+       } else {
+               struct qed_chain *pbl;
+
+               rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr);
+               if (rc)
+                       goto err0;
+
+               pbl = &hw_srq->pbl;
+               page_cnt = qed_chain_get_page_cnt(pbl);
+               pbl_base_addr = qed_chain_get_pbl_phys(pbl);
+               phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
+               page_size = QED_CHAIN_PAGE_SIZE;
+       }
 
-       pbl = &hw_srq->pbl;
-       page_cnt = qed_chain_get_page_cnt(pbl);
-       pbl_base_addr = qed_chain_get_pbl_phys(pbl);
-       phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
-       page_size = QED_CHAIN_PAGE_SIZE;
        in_params.pd_id = pd->pd_id;
        in_params.pbl_base_addr = pbl_base_addr;
        in_params.prod_pair_addr = phy_prod_pair_addr;
@@ -1437,6 +1515,12 @@ struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
 
        srq->srq_id = out_params.srq_id;
 
+       if (udata) {
+               rc = qedr_copy_srq_uresp(dev, srq, udata);
+               if (rc)
+                       goto err2;
+       }
+
        rc = qedr_idr_add(dev, &dev->srqidr, srq, srq->srq_id);
        if (rc)
                goto err2;
@@ -1450,7 +1534,10 @@ err2:
 
        dev->ops->rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
 err1:
-       qedr_free_srq_kernel_params(srq);
+       if (udata)
+               qedr_free_srq_user_params(srq);
+       else
+               qedr_free_srq_kernel_params(srq);
 err0:
        kfree(srq);
 
@@ -1467,7 +1554,10 @@ int qedr_destroy_srq(struct ib_srq *ibsrq)
        in_params.srq_id = srq->srq_id;
        dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
 
-       qedr_free_srq_kernel_params(srq);
+       if (ibsrq->pd->uobject)
+               qedr_free_srq_user_params(srq);
+       else
+               qedr_free_srq_kernel_params(srq);
 
        DP_DEBUG(dev, QEDR_MSG_SRQ,
                 "destroy srq: destroyed srq with srq_id=0x%0x\n",
@@ -1593,9 +1683,10 @@ qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
 
        qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
                           &qp->usq.pbl_info, FW_PAGE_SHIFT);
-
-       qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
-       qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
+       if (!qp->srq) {
+               qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
+               qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
+       }
 
        qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
                           &qp->urq.pbl_info, FW_PAGE_SHIFT);
@@ -1641,11 +1732,13 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
        if (rc)
                return rc;
 
-       /* RQ - read access only (0), dma sync not required (0) */
-       rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
-                                 ureq.rq_len, 0, 0, alloc_and_init);
-       if (rc)
-               return rc;
+       if (!qp->srq) {
+               /* RQ - read access only (0), dma sync not required (0) */
+               rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
+                                         ureq.rq_len, 0, 0, alloc_and_init);
+               if (rc)
+                       return rc;
+       }
 
        memset(&in_params, 0, sizeof(in_params));
        qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
@@ -1653,8 +1746,10 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
        in_params.qp_handle_hi = ureq.qp_handle_hi;
        in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
        in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
-       in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
-       in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
+       if (!qp->srq) {
+               in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
+               in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
+       }
 
        qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
                                              &in_params, &out_params);
index 24c658b3c7904297c1098937d451944642999460..7a10b3a325fa386e806b80a8cbd0eabf93632624 100644 (file)
@@ -111,4 +111,21 @@ struct qedr_create_qp_uresp {
        __u32 reserved;
 };
 
+struct qedr_create_srq_ureq {
+       /* user space virtual address of producer pair */
+       __aligned_u64 prod_pair_addr;
+
+       /* user space virtual address of SRQ buffer */
+       __aligned_u64 srq_addr;
+
+       /* length of SRQ buffer */
+       __aligned_u64 srq_len;
+};
+
+struct qedr_create_srq_uresp {
+       __u16 srq_id;
+       __u16 reserved0;
+       __u32 reserved1;
+};
+
 #endif /* __QEDR_USER_H__ */