RDMA/bnxt_re: Use correct sizing on buffers holding page DMA addresses
authorSelvin Xavier <selvin.xavier@broadcom.com>
Thu, 28 Mar 2019 16:49:43 +0000 (11:49 -0500)
committerJason Gunthorpe <jgg@mellanox.com>
Thu, 28 Mar 2019 17:13:27 +0000 (14:13 -0300)
umem->nmap is used while allocating internal buffer for storing
page DMA addresses. This causes out of bounds array access while iterating
the umem DMA-mapped SGL with umem page combining as umem->nmap can be
less than number of system pages in umem.

Use ib_umem_num_pages() instead of umem->nmap to size the page array.
Add a new structure (bnxt_qplib_sg_info) to pass sglist, npages and nmap.

Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/qplib_fp.c
drivers/infiniband/hw/bnxt_re/qplib_fp.h
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
drivers/infiniband/hw/bnxt_re/qplib_res.c
drivers/infiniband/hw/bnxt_re/qplib_res.h
drivers/infiniband/hw/bnxt_re/qplib_sp.c

index 071b2fc38b0bbf7f28ddf4a83d1b7b57c5924001..33b2a06c6dde116b80e524226d07f61b946185be 100644 (file)
@@ -895,8 +895,9 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
                return PTR_ERR(umem);
 
        qp->sumem = umem;
-       qplib_qp->sq.sglist = umem->sg_head.sgl;
-       qplib_qp->sq.nmap = umem->nmap;
+       qplib_qp->sq.sg_info.sglist = umem->sg_head.sgl;
+       qplib_qp->sq.sg_info.npages = ib_umem_num_pages(umem);
+       qplib_qp->sq.sg_info.nmap = umem->nmap;
        qplib_qp->qp_handle = ureq.qp_handle;
 
        if (!qp->qplib_qp.srq) {
@@ -907,8 +908,9 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
                if (IS_ERR(umem))
                        goto rqfail;
                qp->rumem = umem;
-               qplib_qp->rq.sglist = umem->sg_head.sgl;
-               qplib_qp->rq.nmap = umem->nmap;
+               qplib_qp->rq.sg_info.sglist = umem->sg_head.sgl;
+               qplib_qp->rq.sg_info.npages = ib_umem_num_pages(umem);
+               qplib_qp->rq.sg_info.nmap = umem->nmap;
        }
 
        qplib_qp->dpi = &cntx->dpi;
@@ -916,8 +918,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
 rqfail:
        ib_umem_release(qp->sumem);
        qp->sumem = NULL;
-       qplib_qp->sq.sglist = NULL;
-       qplib_qp->sq.nmap = 0;
+       memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info));
 
        return PTR_ERR(umem);
 }
@@ -1374,8 +1375,9 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
                return PTR_ERR(umem);
 
        srq->umem = umem;
-       qplib_srq->nmap = umem->nmap;
-       qplib_srq->sglist = umem->sg_head.sgl;
+       qplib_srq->sg_info.sglist = umem->sg_head.sgl;
+       qplib_srq->sg_info.npages = ib_umem_num_pages(umem);
+       qplib_srq->sg_info.nmap = umem->nmap;
        qplib_srq->srq_handle = ureq.srq_handle;
        qplib_srq->dpi = &cntx->dpi;
 
@@ -2632,8 +2634,9 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
                        rc = PTR_ERR(cq->umem);
                        goto fail;
                }
-               cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
-               cq->qplib_cq.nmap = cq->umem->nmap;
+               cq->qplib_cq.sg_info.sglist = cq->umem->sg_head.sgl;
+               cq->qplib_cq.sg_info.npages = ib_umem_num_pages(cq->umem);
+               cq->qplib_cq.sg_info.nmap = cq->umem->nmap;
                cq->qplib_cq.dpi = &uctx->dpi;
        } else {
                cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
@@ -2645,8 +2648,6 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
                }
 
                cq->qplib_cq.dpi = &rdev->dpi_privileged;
-               cq->qplib_cq.sghead = NULL;
-               cq->qplib_cq.nmap = 0;
        }
        /*
         * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
index 71c34d5b0ac05f91bf7c7e5e1fbd4eb7957462fe..f034cab303f62d0044f5cc8067761f4a64fad2b7 100644 (file)
@@ -478,7 +478,7 @@ int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
            nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
                nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
        hwq_type = bnxt_qplib_get_hwq_type(nq->res);
-       if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL, 0,
+       if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL,
                                      &nq->hwq.max_elements,
                                      BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0,
                                      PAGE_SIZE, hwq_type))
@@ -542,8 +542,8 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
        int rc, idx;
 
        srq->hwq.max_elements = srq->max_wqe;
-       rc = bnxt_qplib_alloc_init_hwq(res->pdev, &srq->hwq, srq->sglist,
-                                      srq->nmap, &srq->hwq.max_elements,
+       rc = bnxt_qplib_alloc_init_hwq(res->pdev, &srq->hwq, &srq->sg_info,
+                                      &srq->hwq.max_elements,
                                       BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
                                       PAGE_SIZE, HWQ_TYPE_QUEUE);
        if (rc)
@@ -742,7 +742,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
 
        /* SQ */
        sq->hwq.max_elements = sq->max_wqe;
-       rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL, 0,
+       rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL,
                                       &sq->hwq.max_elements,
                                       BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0,
                                       PAGE_SIZE, HWQ_TYPE_QUEUE);
@@ -781,7 +781,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
        /* RQ */
        if (rq->max_wqe) {
                rq->hwq.max_elements = qp->rq.max_wqe;
-               rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL, 0,
+               rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL,
                                               &rq->hwq.max_elements,
                                               BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
                                               PAGE_SIZE, HWQ_TYPE_QUEUE);
@@ -890,8 +890,8 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
                         sizeof(struct sq_psn_search);
        }
        sq->hwq.max_elements = sq->max_wqe;
-       rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, sq->sglist,
-                                      sq->nmap, &sq->hwq.max_elements,
+       rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, &sq->sg_info,
+                                      &sq->hwq.max_elements,
                                       BNXT_QPLIB_MAX_SQE_ENTRY_SIZE,
                                       psn_sz,
                                       PAGE_SIZE, HWQ_TYPE_QUEUE);
@@ -959,8 +959,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
        /* RQ */
        if (rq->max_wqe) {
                rq->hwq.max_elements = rq->max_wqe;
-               rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, rq->sglist,
-                                              rq->nmap, &rq->hwq.max_elements,
+               rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq,
+                                              &rq->sg_info,
+                                              &rq->hwq.max_elements,
                                               BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
                                               PAGE_SIZE, HWQ_TYPE_QUEUE);
                if (rc)
@@ -1030,7 +1031,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
                req_size = xrrq->max_elements *
                           BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
                req_size &= ~(PAGE_SIZE - 1);
-               rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
+               rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL,
                                               &xrrq->max_elements,
                                               BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE,
                                               0, req_size, HWQ_TYPE_CTX);
@@ -1046,7 +1047,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
                           BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
                req_size &= ~(PAGE_SIZE - 1);
 
-               rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
+               rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL,
                                               &xrrq->max_elements,
                                               BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE,
                                               0, req_size, HWQ_TYPE_CTX);
@@ -1935,8 +1936,8 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
        int rc;
 
        cq->hwq.max_elements = cq->max_wqe;
-       rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, cq->sghead,
-                                      cq->nmap, &cq->hwq.max_elements,
+       rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, &cq->sg_info,
+                                      &cq->hwq.max_elements,
                                       BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0,
                                       PAGE_SIZE, HWQ_TYPE_QUEUE);
        if (rc)
index 3f618b5f1f062eae6721a47a8a32a0437491bb92..31436af2a4ecb5319c80235c13d94544e1d6dabf 100644 (file)
@@ -52,10 +52,9 @@ struct bnxt_qplib_srq {
        struct bnxt_qplib_cq            *cq;
        struct bnxt_qplib_hwq           hwq;
        struct bnxt_qplib_swq           *swq;
-       struct scatterlist              *sglist;
        int                             start_idx;
        int                             last_idx;
-       u32                             nmap;
+       struct bnxt_qplib_sg_info       sg_info;
        u16                             eventq_hw_ring_id;
        spinlock_t                      lock; /* protect SRQE link list */
 };
@@ -237,8 +236,7 @@ struct bnxt_qplib_swqe {
 struct bnxt_qplib_q {
        struct bnxt_qplib_hwq           hwq;
        struct bnxt_qplib_swq           *swq;
-       struct scatterlist              *sglist;
-       u32                             nmap;
+       struct bnxt_qplib_sg_info       sg_info;
        u32                             max_wqe;
        u16                             q_full_delta;
        u16                             max_sge;
@@ -381,8 +379,7 @@ struct bnxt_qplib_cq {
        u32                             cnq_hw_ring_id;
        struct bnxt_qplib_nq            *nq;
        bool                            resize_in_progress;
-       struct scatterlist              *sghead;
-       u32                             nmap;
+       struct bnxt_qplib_sg_info       sg_info;
        u64                             cq_handle;
 
 #define CQ_RESIZE_WAIT_TIME_MS         500
index c6461e957078829e0c96cff7edb9ae3edd593c39..48b04d2f175f908e984c12fda163237cf56876b5 100644 (file)
@@ -569,7 +569,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
        rcfw->pdev = pdev;
        rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT;
        hwq_type = bnxt_qplib_get_hwq_type(rcfw->res);
-       if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL, 0,
+       if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL,
                                      &rcfw->creq.max_elements,
                                      BNXT_QPLIB_CREQE_UNITS,
                                      0, PAGE_SIZE, hwq_type)) {
@@ -584,7 +584,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
 
        rcfw->cmdq.max_elements = rcfw->cmdq_depth;
        if (bnxt_qplib_alloc_init_hwq
-                       (rcfw->pdev, &rcfw->cmdq, NULL, 0,
+                       (rcfw->pdev, &rcfw->cmdq, NULL,
                         &rcfw->cmdq.max_elements,
                         BNXT_QPLIB_CMDQE_UNITS, 0,
                         bnxt_qplib_cmdqe_page_size(rcfw->cmdq_depth),
index 0bc24f934829aeb232bf8a6a9e690a793a92d9f6..37928b1111dfc403221983607bab90d2e3326163 100644 (file)
@@ -83,7 +83,8 @@ static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
 }
 
 static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
-                      struct scatterlist *sghead, u32 pages, u32 pg_size)
+                      struct scatterlist *sghead, u32 pages,
+                      u32 nmaps, u32 pg_size)
 {
        struct sg_dma_page_iter sg_iter;
        bool is_umem = false;
@@ -116,7 +117,7 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
        } else {
                i = 0;
                is_umem = true;
-               for_each_sg_dma_page (sghead, &sg_iter, pages, 0) {
+               for_each_sg_dma_page(sghead, &sg_iter, nmaps, 0) {
                        pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter);
                        pbl->pg_arr[i] = NULL;
                        pbl->pg_count++;
@@ -158,12 +159,13 @@ void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq)
 
 /* All HWQs are power of 2 in size */
 int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
-                             struct scatterlist *sghead, int nmap,
+                             struct bnxt_qplib_sg_info *sg_info,
                              u32 *elements, u32 element_size, u32 aux,
                              u32 pg_size, enum bnxt_qplib_hwq_type hwq_type)
 {
-       u32 pages, slots, size, aux_pages = 0, aux_size = 0;
+       u32 pages, maps, slots, size, aux_pages = 0, aux_size = 0;
        dma_addr_t *src_phys_ptr, **dst_virt_ptr;
+       struct scatterlist *sghead = NULL;
        int i, rc;
 
        hwq->level = PBL_LVL_MAX;
@@ -177,6 +179,9 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
        }
        size = roundup_pow_of_two(element_size);
 
+       if (sg_info)
+               sghead = sg_info->sglist;
+
        if (!sghead) {
                hwq->is_user = false;
                pages = (slots * size) / pg_size + aux_pages;
@@ -184,17 +189,20 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
                        pages++;
                if (!pages)
                        return -EINVAL;
+               maps = 0;
        } else {
                hwq->is_user = true;
-               pages = nmap;
+               pages = sg_info->npages;
+               maps = sg_info->nmap;
        }
 
        /* Alloc the 1st memory block; can be a PDL/PTL/PBL */
        if (sghead && (pages == MAX_PBL_LVL_0_PGS))
                rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], sghead,
-                                pages, pg_size);
+                                pages, maps, pg_size);
        else
-               rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], NULL, 1, pg_size);
+               rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], NULL,
+                                1, 0, pg_size);
        if (rc)
                goto fail;
 
@@ -204,7 +212,8 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
                if (pages > MAX_PBL_LVL_1_PGS) {
                        /* 2 levels of indirection */
                        rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], NULL,
-                                        MAX_PBL_LVL_1_PGS_FOR_LVL_2, pg_size);
+                                        MAX_PBL_LVL_1_PGS_FOR_LVL_2,
+                                        0, pg_size);
                        if (rc)
                                goto fail;
                        /* Fill in lvl0 PBL */
@@ -217,7 +226,7 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
                        hwq->level = PBL_LVL_1;
 
                        rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_2], sghead,
-                                        pages, pg_size);
+                                        pages, maps, pg_size);
                        if (rc)
                                goto fail;
 
@@ -246,7 +255,7 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
 
                        /* 1 level of indirection */
                        rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], sghead,
-                                        pages, pg_size);
+                                        pages, maps, pg_size);
                        if (rc)
                                goto fail;
                        /* Fill in lvl0 PBL */
@@ -339,7 +348,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
 
        /* QPC Tables */
        ctx->qpc_tbl.max_elements = ctx->qpc_count;
-       rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->qpc_tbl, NULL, 0,
+       rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->qpc_tbl, NULL,
                                       &ctx->qpc_tbl.max_elements,
                                       BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE, 0,
                                       PAGE_SIZE, HWQ_TYPE_CTX);
@@ -348,7 +357,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
 
        /* MRW Tables */
        ctx->mrw_tbl.max_elements = ctx->mrw_count;
-       rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->mrw_tbl, NULL, 0,
+       rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->mrw_tbl, NULL,
                                       &ctx->mrw_tbl.max_elements,
                                       BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE, 0,
                                       PAGE_SIZE, HWQ_TYPE_CTX);
@@ -357,7 +366,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
 
        /* SRQ Tables */
        ctx->srqc_tbl.max_elements = ctx->srqc_count;
-       rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->srqc_tbl, NULL, 0,
+       rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->srqc_tbl, NULL,
                                       &ctx->srqc_tbl.max_elements,
                                       BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE, 0,
                                       PAGE_SIZE, HWQ_TYPE_CTX);
@@ -366,7 +375,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
 
        /* CQ Tables */
        ctx->cq_tbl.max_elements = ctx->cq_count;
-       rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->cq_tbl, NULL, 0,
+       rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->cq_tbl, NULL,
                                       &ctx->cq_tbl.max_elements,
                                       BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE, 0,
                                       PAGE_SIZE, HWQ_TYPE_CTX);
@@ -375,7 +384,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
 
        /* TQM Buffer */
        ctx->tqm_pde.max_elements = 512;
-       rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_pde, NULL, 0,
+       rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_pde, NULL,
                                       &ctx->tqm_pde.max_elements, sizeof(u64),
                                       0, PAGE_SIZE, HWQ_TYPE_CTX);
        if (rc)
@@ -386,7 +395,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
                        continue;
                ctx->tqm_tbl[i].max_elements = ctx->qpc_count *
                                               ctx->tqm_count[i];
-               rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_tbl[i], NULL, 0,
+               rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_tbl[i], NULL,
                                               &ctx->tqm_tbl[i].max_elements, 1,
                                               0, PAGE_SIZE, HWQ_TYPE_CTX);
                if (rc)
@@ -424,7 +433,7 @@ int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
 
        /* TIM Buffer */
        ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
-       rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tim_tbl, NULL, 0,
+       rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tim_tbl, NULL,
                                       &ctx->tim_tbl.max_elements, 1,
                                       0, PAGE_SIZE, HWQ_TYPE_CTX);
        if (rc)
index 32cebd0f1436a6a41a224194c0aa03f3747e0398..30c42c92fac72fc91752cb005015bcccdf48d4cf 100644 (file)
@@ -219,6 +219,12 @@ static inline u8 bnxt_qplib_get_ring_type(struct bnxt_qplib_chip_ctx *cctx)
               RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL;
 }
 
+struct bnxt_qplib_sg_info {
+       struct scatterlist              *sglist;
+       u32                             nmap;
+       u32                             npages;
+};
+
 #define to_bnxt_qplib(ptr, type, member)       \
        container_of(ptr, type, member)
 
@@ -227,7 +233,7 @@ struct bnxt_qplib_dev_attr;
 
 void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq);
 int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
-                             struct scatterlist *sl, int nmap, u32 *elements,
+                             struct bnxt_qplib_sg_info *sg_info, u32 *elements,
                              u32 elements_per_page, u32 aux, u32 pg_size,
                              enum bnxt_qplib_hwq_type hwq_type);
 void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid);
index e9c53e4064048508761f1a74fee81fe5f668bb57..ef1938733a418241cac3f65946cf1508624237af 100644 (file)
@@ -684,7 +684,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
 
                mr->hwq.max_elements = pages;
                /* Use system PAGE_SIZE */
-               rc = bnxt_qplib_alloc_init_hwq(res->pdev, &mr->hwq, NULL, 0,
+               rc = bnxt_qplib_alloc_init_hwq(res->pdev, &mr->hwq, NULL,
                                               &mr->hwq.max_elements,
                                               PAGE_SIZE, 0, PAGE_SIZE,
                                               HWQ_TYPE_CTX);
@@ -754,7 +754,7 @@ int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
                return -ENOMEM;
 
        frpl->hwq.max_elements = pages;
-       rc = bnxt_qplib_alloc_init_hwq(res->pdev, &frpl->hwq, NULL, 0,
+       rc = bnxt_qplib_alloc_init_hwq(res->pdev, &frpl->hwq, NULL,
                                       &frpl->hwq.max_elements, PAGE_SIZE, 0,
                                       PAGE_SIZE, HWQ_TYPE_CTX);
        if (!rc)