RDMA/core: Fix umem iterator when PAGE_SIZE is greater then HCA pgsz
[linux-2.6-block.git] / include / rdma / ib_verbs.h
index fb1a2d6b196900d0b98ca454c4dbd2c97bfb9142..b7b6b58dd3486d98e5d641149b69dcf13e694292 100644 (file)
@@ -2850,6 +2850,7 @@ struct ib_block_iter {
        /* internal states */
        struct scatterlist *__sg;       /* sg holding the current aligned block */
        dma_addr_t __dma_addr;          /* unaligned DMA address of this block */
+       size_t __sg_numblocks;          /* ib_umem_num_dma_blocks() */
        unsigned int __sg_nents;        /* number of SG entries */
        unsigned int __sg_advance;      /* number of bytes to advance in sg in next step */
        unsigned int __pg_bit;          /* alignment of current block */