RDMA/bnxt_re: Use for_each_sg_dma_page iterator on umem SGL
authorShiraz, Saleem <shiraz.saleem@intel.com>
Mon, 11 Feb 2019 15:24:57 +0000 (09:24 -0600)
committerJason Gunthorpe <jgg@mellanox.com>
Mon, 11 Feb 2019 22:02:33 +0000 (15:02 -0700)
Use the for_each_sg_dma_page iterator variant to walk the umem DMA-mapped
SGL and get the page DMA address. This avoids the extra loop to iterate
pages in the SGE when for_each_sg iterator is used.

Additionally, purge umem->page_shift usage in the driver as its only
relevant for ODP MRs. Use system page size and shift instead.

Signed-off-by: Shiraz, Saleem <shiraz.saleem@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/qplib_res.c

index 1606571af63d6be1e53e31168742ba6902db0849..bff9320a968ed18294b90d306613e40cfe2b4ed2 100644 (file)
@@ -3553,19 +3553,14 @@ static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
        u64 *pbl_tbl = pbl_tbl_orig;
        u64 paddr;
        u64 page_mask = (1ULL << page_shift) - 1;
-       int i, pages;
-       struct scatterlist *sg;
-       int entry;
-
-       for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
-               pages = sg_dma_len(sg) >> PAGE_SHIFT;
-               for (i = 0; i < pages; i++) {
-                       paddr = sg_dma_address(sg) + (i << PAGE_SHIFT);
-                       if (pbl_tbl == pbl_tbl_orig)
-                               *pbl_tbl++ = paddr & ~page_mask;
-                       else if ((paddr & page_mask) == 0)
-                               *pbl_tbl++ = paddr;
-               }
+       struct sg_dma_page_iter sg_iter;
+
+       for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
+               paddr = sg_page_iter_dma_address(&sg_iter);
+               if (pbl_tbl == pbl_tbl_orig)
+                       *pbl_tbl++ = paddr & ~page_mask;
+               else if ((paddr & page_mask) == 0)
+                       *pbl_tbl++ = paddr;
        }
        return pbl_tbl - pbl_tbl_orig;
 }
@@ -3628,7 +3623,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
                goto free_umem;
        }
 
-       page_shift = umem->page_shift;
+       page_shift = PAGE_SHIFT;
 
        if (!bnxt_re_page_size_ok(page_shift)) {
                dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
index c8502c2844a273a2ef1b5c44902c1561431779a4..d08b9d9948fd3c9372b66e2fd75c3114505291d9 100644 (file)
@@ -85,7 +85,7 @@ static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
 static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
                       struct scatterlist *sghead, u32 pages, u32 pg_size)
 {
-       struct scatterlist *sg;
+       struct sg_dma_page_iter sg_iter;
        bool is_umem = false;
        int i;
 
@@ -116,12 +116,13 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
        } else {
                i = 0;
                is_umem = true;
-               for_each_sg(sghead, sg, pages, i) {
-                       pbl->pg_map_arr[i] = sg_dma_address(sg);
-                       pbl->pg_arr[i] = sg_virt(sg);
+               for_each_sg_dma_page (sghead, &sg_iter, pages, 0) {
+                       pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter);
+                       pbl->pg_arr[i] = NULL;
                        if (!pbl->pg_arr[i])
                                goto fail;
 
+                       i++;
                        pbl->pg_count++;
                }
        }