Commit | Line | Data |
---|---|---|
6bf9d8f6 | 1 | /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ |
f7c6a7b5 RD |
2 | /* |
3 | * Copyright (c) 2007 Cisco Systems. All rights reserved. | |
368c0159 | 4 | * Copyright (c) 2020 Intel Corporation. All rights reserved. |
f7c6a7b5 RD |
5 | */ |
6 | ||
7 | #ifndef IB_UMEM_H | |
8 | #define IB_UMEM_H | |
9 | ||
10 | #include <linux/list.h> | |
11 | #include <linux/scatterlist.h> | |
e8edc6e0 | 12 | #include <linux/workqueue.h> |
b0ea0fa5 | 13 | #include <rdma/ib_verbs.h> |
f7c6a7b5 RD |
14 | |
15 | struct ib_ucontext; | |
8ada2c1c | 16 | struct ib_umem_odp; |
368c0159 | 17 | struct dma_buf_attach_ops; |
f7c6a7b5 RD |
18 | |
19 | struct ib_umem { | |
47f725ee | 20 | struct ib_device *ibdev; |
d4b4dd1b | 21 | struct mm_struct *owning_mm; |
a665aca8 | 22 | u64 iova; |
f7c6a7b5 | 23 | size_t length; |
406f9e5f | 24 | unsigned long address; |
597ecc5a | 25 | u32 writable : 1; |
597ecc5a | 26 | u32 is_odp : 1; |
368c0159 | 27 | u32 is_dmabuf : 1; |
1bf66a30 | 28 | struct work_struct work; |
79fbd3e1 | 29 | struct sg_append_table sgt_append; |
f7c6a7b5 RD |
30 | }; |
31 | ||
368c0159 JX |
32 | struct ib_umem_dmabuf { |
33 | struct ib_umem umem; | |
34 | struct dma_buf_attachment *attach; | |
35 | struct sg_table *sgt; | |
36 | struct scatterlist *first_sg; | |
37 | struct scatterlist *last_sg; | |
38 | unsigned long first_sg_offset; | |
39 | unsigned long last_sg_trim; | |
40 | void *private; | |
1e4df4a2 | 41 | u8 pinned : 1; |
368c0159 JX |
42 | }; |
43 | ||
44 | static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem) | |
45 | { | |
46 | return container_of(umem, struct ib_umem_dmabuf, umem); | |
47 | } | |
48 | ||
406f9e5f HE |
49 | /* Returns the offset of the umem start relative to the first page. */ |
50 | static inline int ib_umem_offset(struct ib_umem *umem) | |
51 | { | |
d2183c6f | 52 | return umem->address & ~PAGE_MASK; |
406f9e5f HE |
53 | } |
54 | ||
b045db62 JG |
55 | static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem, |
56 | unsigned long pgsz) | |
57 | { | |
79fbd3e1 | 58 | return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) & |
b045db62 JG |
59 | (pgsz - 1); |
60 | } | |
61 | ||
a665aca8 JG |
62 | static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem, |
63 | unsigned long pgsz) | |
64 | { | |
65 | return (size_t)((ALIGN(umem->iova + umem->length, pgsz) - | |
66 | ALIGN_DOWN(umem->iova, pgsz))) / | |
67 | pgsz; | |
68 | } | |
69 | ||
406f9e5f HE |
70 | static inline size_t ib_umem_num_pages(struct ib_umem *umem) |
71 | { | |
a665aca8 | 72 | return ib_umem_num_dma_blocks(umem, PAGE_SIZE); |
406f9e5f HE |
73 | } |
74 | ||
ebc24096 JG |
75 | static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter, |
76 | struct ib_umem *umem, | |
77 | unsigned long pgsz) | |
78 | { | |
79fbd3e1 MG |
79 | __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl, |
80 | umem->sgt_append.sgt.nents, pgsz); | |
ebc24096 JG |
81 | } |
82 | ||
83 | /** | |
84 | * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem | |
85 | * @umem: umem to iterate over | |
86 | * @pgsz: Page size to split the list into | |
87 | * | |
88 | * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The | |
89 | * returned DMA blocks will be aligned to pgsz and span the range: | |
90 | * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz) | |
a665aca8 JG |
91 | * |
92 | * Performs exactly ib_umem_num_dma_blocks() iterations. | |
ebc24096 JG |
93 | */ |
94 | #define rdma_umem_for_each_dma_block(umem, biter, pgsz) \ | |
95 | for (__rdma_umem_block_iter_start(biter, umem, pgsz); \ | |
96 | __rdma_block_iter_next(biter);) | |
97 | ||
f7c6a7b5 RD |
98 | #ifdef CONFIG_INFINIBAND_USER_MEM |
99 | ||
c320e527 | 100 | struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, |
72b894b0 | 101 | size_t size, int access); |
f7c6a7b5 | 102 | void ib_umem_release(struct ib_umem *umem); |
c5d76f13 HE |
103 | int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, |
104 | size_t length); | |
4a353399 SS |
105 | unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, |
106 | unsigned long pgsz_bitmap, | |
107 | unsigned long virt); | |
368c0159 | 108 | |
b045db62 JG |
109 | /** |
110 | * ib_umem_find_best_pgoff - Find best HW page size | |
111 | * | |
112 | * @umem: umem struct | |
113 | * @pgsz_bitmap bitmap of HW supported page sizes | |
114 | * @pgoff_bitmask: Mask of bits that can be represented with an offset | |
115 | * | |
116 | * This is very similar to ib_umem_find_best_pgsz() except instead of accepting | |
117 | * an IOVA it accepts a bitmask specifying what address bits can be represented | |
118 | * with a page offset. | |
119 | * | |
120 | * For instance if the HW has multiple page sizes, requires 64 byte alignemnt, | |
121 | * and can support aligned offsets up to 4032 then pgoff_bitmask would be | |
122 | * "111111000000". | |
123 | * | |
124 | * If the pgoff_bitmask requires either alignment in the low bit or an | |
125 | * unavailable page size for the high bits, this function returns 0. | |
126 | */ | |
127 | static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem, | |
128 | unsigned long pgsz_bitmap, | |
129 | u64 pgoff_bitmask) | |
130 | { | |
79fbd3e1 | 131 | struct scatterlist *sg = umem->sgt_append.sgt.sgl; |
b045db62 JG |
132 | dma_addr_t dma_addr; |
133 | ||
134 | dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK); | |
135 | return ib_umem_find_best_pgsz(umem, pgsz_bitmap, | |
136 | dma_addr & pgoff_bitmask); | |
137 | } | |
f7c6a7b5 | 138 | |
368c0159 JX |
139 | struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device, |
140 | unsigned long offset, size_t size, | |
141 | int fd, int access, | |
142 | const struct dma_buf_attach_ops *ops); | |
1e4df4a2 GP |
143 | struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device, |
144 | unsigned long offset, | |
145 | size_t size, int fd, | |
146 | int access); | |
368c0159 JX |
147 | int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf); |
148 | void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf); | |
149 | void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf); | |
150 | ||
f7c6a7b5 RD |
151 | #else /* CONFIG_INFINIBAND_USER_MEM */ |
152 | ||
153 | #include <linux/err.h> | |
154 | ||
c320e527 | 155 | static inline struct ib_umem *ib_umem_get(struct ib_device *device, |
f7c6a7b5 | 156 | unsigned long addr, size_t size, |
72b894b0 | 157 | int access) |
b0ea0fa5 | 158 | { |
368c0159 | 159 | return ERR_PTR(-EOPNOTSUPP); |
f7c6a7b5 RD |
160 | } |
161 | static inline void ib_umem_release(struct ib_umem *umem) { } | |
c1395a2a HE |
162 | static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, |
163 | size_t length) { | |
368c0159 | 164 | return -EOPNOTSUPP; |
c1395a2a | 165 | } |
61690d01 JG |
166 | static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, |
167 | unsigned long pgsz_bitmap, | |
168 | unsigned long virt) | |
169 | { | |
170 | return 0; | |
4a353399 | 171 | } |
b045db62 JG |
172 | static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem, |
173 | unsigned long pgsz_bitmap, | |
174 | u64 pgoff_bitmask) | |
175 | { | |
176 | return 0; | |
177 | } | |
368c0159 JX |
178 | static inline |
179 | struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device, | |
180 | unsigned long offset, | |
181 | size_t size, int fd, | |
182 | int access, | |
183 | struct dma_buf_attach_ops *ops) | |
184 | { | |
185 | return ERR_PTR(-EOPNOTSUPP); | |
186 | } | |
1e4df4a2 GP |
187 | static inline struct ib_umem_dmabuf * |
188 | ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset, | |
189 | size_t size, int fd, int access) | |
190 | { | |
191 | return ERR_PTR(-EOPNOTSUPP); | |
192 | } | |
368c0159 JX |
193 | static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf) |
194 | { | |
195 | return -EOPNOTSUPP; | |
196 | } | |
197 | static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { } | |
198 | static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { } | |
4a353399 | 199 | |
f7c6a7b5 | 200 | #endif /* CONFIG_INFINIBAND_USER_MEM */ |
f7c6a7b5 | 201 | #endif /* IB_UMEM_H */ |