Commit | Line | Data |
---|---|---|
2025cf9e | 1 | // SPDX-License-Identifier: GPL-2.0-only |
a060b562 CH |
2 | /* |
3 | * Copyright (c) 2016 HGST, a Western Digital Company. | |
a060b562 CH |
4 | */ |
5 | #include <linux/moduleparam.h> | |
6 | #include <linux/slab.h> | |
50b7d220 | 7 | #include <linux/pci-p2pdma.h> |
a060b562 CH |
8 | #include <rdma/mr_pool.h> |
9 | #include <rdma/rw.h> | |
10 | ||
11 | enum { | |
12 | RDMA_RW_SINGLE_WR, | |
13 | RDMA_RW_MULTI_WR, | |
14 | RDMA_RW_MR, | |
0e353e34 | 15 | RDMA_RW_SIG_MR, |
a060b562 CH |
16 | }; |
17 | ||
18 | static bool rdma_rw_force_mr; | |
19 | module_param_named(force_mr, rdma_rw_force_mr, bool, 0); | |
20 | MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations"); | |
21 | ||
22 | /* | |
00bd1439 YF |
23 | * Report whether memory registration should be used. Memory registration must |
24 | * be used for iWarp devices because of iWARP-specific limitations. Memory | |
25 | * registration is also enabled if registering memory might yield better | |
26 | * performance than using multiple SGE entries, see rdma_rw_io_needs_mr() | |
a060b562 CH |
27 | */ |
28 | static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num) | |
29 | { | |
30 | if (rdma_protocol_iwarp(dev, port_num)) | |
31 | return true; | |
00bd1439 YF |
32 | if (dev->attrs.max_sgl_rd) |
33 | return true; | |
a060b562 CH |
34 | if (unlikely(rdma_rw_force_mr)) |
35 | return true; | |
36 | return false; | |
37 | } | |
38 | ||
39 | /* | |
40 | * Check if the device will use memory registration for this RW operation. | |
00bd1439 YF |
41 | * For RDMA READs we must use MRs on iWarp and can optionally use them as an |
42 | * optimization otherwise. Additionally we have a debug option to force usage | |
43 | * of MRs to help testing this code path. | |
a060b562 CH |
44 | */ |
45 | static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num, | |
46 | enum dma_data_direction dir, int dma_nents) | |
47 | { | |
00bd1439 YF |
48 | if (dir == DMA_FROM_DEVICE) { |
49 | if (rdma_protocol_iwarp(dev, port_num)) | |
50 | return true; | |
51 | if (dev->attrs.max_sgl_rd && dma_nents > dev->attrs.max_sgl_rd) | |
52 | return true; | |
53 | } | |
a060b562 CH |
54 | if (unlikely(rdma_rw_force_mr)) |
55 | return true; | |
56 | return false; | |
57 | } | |
58 | ||
e9a53e73 IR |
59 | static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev, |
60 | bool pi_support) | |
a060b562 | 61 | { |
e9a53e73 IR |
62 | u32 max_pages; |
63 | ||
64 | if (pi_support) | |
65 | max_pages = dev->attrs.max_pi_fast_reg_page_list_len; | |
66 | else | |
67 | max_pages = dev->attrs.max_fast_reg_page_list_len; | |
68 | ||
a060b562 | 69 | /* arbitrary limit to avoid allocating gigantic resources */ |
e9a53e73 | 70 | return min_t(u32, max_pages, 256); |
a060b562 CH |
71 | } |
72 | ||
6cb2d5b1 IR |
73 | static inline int rdma_rw_inv_key(struct rdma_rw_reg_ctx *reg) |
74 | { | |
75 | int count = 0; | |
76 | ||
77 | if (reg->mr->need_inval) { | |
78 | reg->inv_wr.opcode = IB_WR_LOCAL_INV; | |
79 | reg->inv_wr.ex.invalidate_rkey = reg->mr->lkey; | |
80 | reg->inv_wr.next = ®->reg_wr.wr; | |
81 | count++; | |
82 | } else { | |
83 | reg->inv_wr.next = NULL; | |
84 | } | |
85 | ||
86 | return count; | |
87 | } | |
88 | ||
eaa74ec7 | 89 | /* Caller must have zero-initialized *reg. */ |
a060b562 CH |
90 | static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num, |
91 | struct rdma_rw_reg_ctx *reg, struct scatterlist *sg, | |
92 | u32 sg_cnt, u32 offset) | |
93 | { | |
e9a53e73 IR |
94 | u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device, |
95 | qp->integrity_en); | |
a060b562 CH |
96 | u32 nents = min(sg_cnt, pages_per_mr); |
97 | int count = 0, ret; | |
98 | ||
99 | reg->mr = ib_mr_pool_get(qp, &qp->rdma_mrs); | |
100 | if (!reg->mr) | |
101 | return -EAGAIN; | |
102 | ||
6cb2d5b1 | 103 | count += rdma_rw_inv_key(reg); |
a060b562 | 104 | |
9aa8b321 | 105 | ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE); |
c2d7c8ff | 106 | if (ret < 0 || ret < nents) { |
a060b562 CH |
107 | ib_mr_pool_put(qp, &qp->rdma_mrs, reg->mr); |
108 | return -EINVAL; | |
109 | } | |
110 | ||
111 | reg->reg_wr.wr.opcode = IB_WR_REG_MR; | |
112 | reg->reg_wr.mr = reg->mr; | |
113 | reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE; | |
114 | if (rdma_protocol_iwarp(qp->device, port_num)) | |
115 | reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE; | |
116 | count++; | |
117 | ||
118 | reg->sge.addr = reg->mr->iova; | |
119 | reg->sge.length = reg->mr->length; | |
120 | return count; | |
121 | } | |
122 | ||
123 | static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, | |
124 | u8 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset, | |
125 | u64 remote_addr, u32 rkey, enum dma_data_direction dir) | |
126 | { | |
eaa74ec7 | 127 | struct rdma_rw_reg_ctx *prev = NULL; |
e9a53e73 IR |
128 | u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device, |
129 | qp->integrity_en); | |
a060b562 CH |
130 | int i, j, ret = 0, count = 0; |
131 | ||
95a776e8 | 132 | ctx->nr_ops = DIV_ROUND_UP(sg_cnt, pages_per_mr); |
a060b562 CH |
133 | ctx->reg = kcalloc(ctx->nr_ops, sizeof(*ctx->reg), GFP_KERNEL); |
134 | if (!ctx->reg) { | |
135 | ret = -ENOMEM; | |
136 | goto out; | |
137 | } | |
138 | ||
139 | for (i = 0; i < ctx->nr_ops; i++) { | |
a060b562 CH |
140 | struct rdma_rw_reg_ctx *reg = &ctx->reg[i]; |
141 | u32 nents = min(sg_cnt, pages_per_mr); | |
142 | ||
143 | ret = rdma_rw_init_one_mr(qp, port_num, reg, sg, sg_cnt, | |
144 | offset); | |
145 | if (ret < 0) | |
146 | goto out_free; | |
147 | count += ret; | |
148 | ||
149 | if (prev) { | |
150 | if (reg->mr->need_inval) | |
151 | prev->wr.wr.next = ®->inv_wr; | |
152 | else | |
153 | prev->wr.wr.next = ®->reg_wr.wr; | |
154 | } | |
155 | ||
156 | reg->reg_wr.wr.next = ®->wr.wr; | |
157 | ||
158 | reg->wr.wr.sg_list = ®->sge; | |
159 | reg->wr.wr.num_sge = 1; | |
160 | reg->wr.remote_addr = remote_addr; | |
161 | reg->wr.rkey = rkey; | |
162 | if (dir == DMA_TO_DEVICE) { | |
163 | reg->wr.wr.opcode = IB_WR_RDMA_WRITE; | |
164 | } else if (!rdma_cap_read_inv(qp->device, port_num)) { | |
165 | reg->wr.wr.opcode = IB_WR_RDMA_READ; | |
166 | } else { | |
167 | reg->wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV; | |
168 | reg->wr.wr.ex.invalidate_rkey = reg->mr->lkey; | |
169 | } | |
170 | count++; | |
171 | ||
172 | remote_addr += reg->sge.length; | |
173 | sg_cnt -= nents; | |
174 | for (j = 0; j < nents; j++) | |
175 | sg = sg_next(sg); | |
eaa74ec7 | 176 | prev = reg; |
a060b562 CH |
177 | offset = 0; |
178 | } | |
179 | ||
eaa74ec7 BVA |
180 | if (prev) |
181 | prev->wr.wr.next = NULL; | |
182 | ||
a060b562 CH |
183 | ctx->type = RDMA_RW_MR; |
184 | return count; | |
185 | ||
186 | out_free: | |
187 | while (--i >= 0) | |
188 | ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr); | |
189 | kfree(ctx->reg); | |
190 | out: | |
191 | return ret; | |
192 | } | |
193 | ||
194 | static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, | |
195 | struct scatterlist *sg, u32 sg_cnt, u32 offset, | |
196 | u64 remote_addr, u32 rkey, enum dma_data_direction dir) | |
197 | { | |
632bc3f6 BVA |
198 | u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge : |
199 | qp->max_read_sge; | |
a060b562 CH |
200 | struct ib_sge *sge; |
201 | u32 total_len = 0, i, j; | |
202 | ||
203 | ctx->nr_ops = DIV_ROUND_UP(sg_cnt, max_sge); | |
204 | ||
205 | ctx->map.sges = sge = kcalloc(sg_cnt, sizeof(*sge), GFP_KERNEL); | |
206 | if (!ctx->map.sges) | |
207 | goto out; | |
208 | ||
209 | ctx->map.wrs = kcalloc(ctx->nr_ops, sizeof(*ctx->map.wrs), GFP_KERNEL); | |
210 | if (!ctx->map.wrs) | |
211 | goto out_free_sges; | |
212 | ||
213 | for (i = 0; i < ctx->nr_ops; i++) { | |
214 | struct ib_rdma_wr *rdma_wr = &ctx->map.wrs[i]; | |
215 | u32 nr_sge = min(sg_cnt, max_sge); | |
216 | ||
217 | if (dir == DMA_TO_DEVICE) | |
218 | rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; | |
219 | else | |
220 | rdma_wr->wr.opcode = IB_WR_RDMA_READ; | |
221 | rdma_wr->remote_addr = remote_addr + total_len; | |
222 | rdma_wr->rkey = rkey; | |
eaa74ec7 | 223 | rdma_wr->wr.num_sge = nr_sge; |
a060b562 CH |
224 | rdma_wr->wr.sg_list = sge; |
225 | ||
226 | for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) { | |
a163afc8 BVA |
227 | sge->addr = sg_dma_address(sg) + offset; |
228 | sge->length = sg_dma_len(sg) - offset; | |
a060b562 CH |
229 | sge->lkey = qp->pd->local_dma_lkey; |
230 | ||
231 | total_len += sge->length; | |
232 | sge++; | |
233 | sg_cnt--; | |
234 | offset = 0; | |
235 | } | |
236 | ||
eaa74ec7 BVA |
237 | rdma_wr->wr.next = i + 1 < ctx->nr_ops ? |
238 | &ctx->map.wrs[i + 1].wr : NULL; | |
a060b562 CH |
239 | } |
240 | ||
241 | ctx->type = RDMA_RW_MULTI_WR; | |
242 | return ctx->nr_ops; | |
243 | ||
244 | out_free_sges: | |
245 | kfree(ctx->map.sges); | |
246 | out: | |
247 | return -ENOMEM; | |
248 | } | |
249 | ||
250 | static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp, | |
251 | struct scatterlist *sg, u32 offset, u64 remote_addr, u32 rkey, | |
252 | enum dma_data_direction dir) | |
253 | { | |
a060b562 CH |
254 | struct ib_rdma_wr *rdma_wr = &ctx->single.wr; |
255 | ||
256 | ctx->nr_ops = 1; | |
257 | ||
258 | ctx->single.sge.lkey = qp->pd->local_dma_lkey; | |
a163afc8 BVA |
259 | ctx->single.sge.addr = sg_dma_address(sg) + offset; |
260 | ctx->single.sge.length = sg_dma_len(sg) - offset; | |
a060b562 CH |
261 | |
262 | memset(rdma_wr, 0, sizeof(*rdma_wr)); | |
263 | if (dir == DMA_TO_DEVICE) | |
264 | rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; | |
265 | else | |
266 | rdma_wr->wr.opcode = IB_WR_RDMA_READ; | |
267 | rdma_wr->wr.sg_list = &ctx->single.sge; | |
268 | rdma_wr->wr.num_sge = 1; | |
269 | rdma_wr->remote_addr = remote_addr; | |
270 | rdma_wr->rkey = rkey; | |
271 | ||
272 | ctx->type = RDMA_RW_SINGLE_WR; | |
273 | return 1; | |
274 | } | |
275 | ||
6affca14 MG |
276 | static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg, |
277 | u32 sg_cnt, enum dma_data_direction dir) | |
278 | { | |
279 | if (is_pci_p2pdma_page(sg_page(sg))) | |
280 | pci_p2pdma_unmap_sg(dev->dma_device, sg, sg_cnt, dir); | |
281 | else | |
282 | ib_dma_unmap_sg(dev, sg, sg_cnt, dir); | |
283 | } | |
284 | ||
285 | static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg, | |
286 | u32 sg_cnt, enum dma_data_direction dir) | |
287 | { | |
288 | if (is_pci_p2pdma_page(sg_page(sg))) | |
289 | return pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir); | |
290 | return ib_dma_map_sg(dev, sg, sg_cnt, dir); | |
291 | } | |
292 | ||
a060b562 CH |
293 | /** |
294 | * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context | |
295 | * @ctx: context to initialize | |
296 | * @qp: queue pair to operate on | |
297 | * @port_num: port num to which the connection is bound | |
298 | * @sg: scatterlist to READ/WRITE from/to | |
299 | * @sg_cnt: number of entries in @sg | |
300 | * @sg_offset: current byte offset into @sg | |
301 | * @remote_addr:remote address to read/write (relative to @rkey) | |
302 | * @rkey: remote key to operate on | |
303 | * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ | |
304 | * | |
305 | * Returns the number of WQEs that will be needed on the workqueue if | |
306 | * successful, or a negative error code. | |
307 | */ | |
308 | int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, | |
309 | struct scatterlist *sg, u32 sg_cnt, u32 sg_offset, | |
310 | u64 remote_addr, u32 rkey, enum dma_data_direction dir) | |
311 | { | |
312 | struct ib_device *dev = qp->pd->device; | |
313 | int ret; | |
314 | ||
6affca14 | 315 | ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir); |
a060b562 CH |
316 | if (!ret) |
317 | return -ENOMEM; | |
318 | sg_cnt = ret; | |
319 | ||
320 | /* | |
321 | * Skip to the S/G entry that sg_offset falls into: | |
322 | */ | |
323 | for (;;) { | |
a163afc8 | 324 | u32 len = sg_dma_len(sg); |
a060b562 CH |
325 | |
326 | if (sg_offset < len) | |
327 | break; | |
328 | ||
329 | sg = sg_next(sg); | |
330 | sg_offset -= len; | |
331 | sg_cnt--; | |
332 | } | |
333 | ||
334 | ret = -EIO; | |
335 | if (WARN_ON_ONCE(sg_cnt == 0)) | |
336 | goto out_unmap_sg; | |
337 | ||
338 | if (rdma_rw_io_needs_mr(qp->device, port_num, dir, sg_cnt)) { | |
339 | ret = rdma_rw_init_mr_wrs(ctx, qp, port_num, sg, sg_cnt, | |
340 | sg_offset, remote_addr, rkey, dir); | |
341 | } else if (sg_cnt > 1) { | |
342 | ret = rdma_rw_init_map_wrs(ctx, qp, sg, sg_cnt, sg_offset, | |
343 | remote_addr, rkey, dir); | |
344 | } else { | |
345 | ret = rdma_rw_init_single_wr(ctx, qp, sg, sg_offset, | |
346 | remote_addr, rkey, dir); | |
347 | } | |
348 | ||
349 | if (ret < 0) | |
350 | goto out_unmap_sg; | |
351 | return ret; | |
352 | ||
353 | out_unmap_sg: | |
6affca14 | 354 | rdma_rw_unmap_sg(dev, sg, sg_cnt, dir); |
a060b562 CH |
355 | return ret; |
356 | } | |
357 | EXPORT_SYMBOL(rdma_rw_ctx_init); | |
358 | ||
0e353e34 | 359 | /** |
222c7b1f | 360 | * rdma_rw_ctx_signature_init - initialize a RW context with signature offload |
0e353e34 CH |
361 | * @ctx: context to initialize |
362 | * @qp: queue pair to operate on | |
363 | * @port_num: port num to which the connection is bound | |
364 | * @sg: scatterlist to READ/WRITE from/to | |
365 | * @sg_cnt: number of entries in @sg | |
366 | * @prot_sg: scatterlist to READ/WRITE protection information from/to | |
367 | * @prot_sg_cnt: number of entries in @prot_sg | |
368 | * @sig_attrs: signature offloading algorithms | |
369 | * @remote_addr:remote address to read/write (relative to @rkey) | |
370 | * @rkey: remote key to operate on | |
371 | * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ | |
372 | * | |
373 | * Returns the number of WQEs that will be needed on the workqueue if | |
374 | * successful, or a negative error code. | |
375 | */ | |
376 | int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, | |
377 | u8 port_num, struct scatterlist *sg, u32 sg_cnt, | |
378 | struct scatterlist *prot_sg, u32 prot_sg_cnt, | |
379 | struct ib_sig_attrs *sig_attrs, | |
380 | u64 remote_addr, u32 rkey, enum dma_data_direction dir) | |
381 | { | |
382 | struct ib_device *dev = qp->pd->device; | |
e9a53e73 IR |
383 | u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device, |
384 | qp->integrity_en); | |
0e353e34 | 385 | struct ib_rdma_wr *rdma_wr; |
0e353e34 CH |
386 | int count = 0, ret; |
387 | ||
388 | if (sg_cnt > pages_per_mr || prot_sg_cnt > pages_per_mr) { | |
53bfbf9b MG |
389 | pr_err("SG count too large: sg_cnt=%d, prot_sg_cnt=%d, pages_per_mr=%d\n", |
390 | sg_cnt, prot_sg_cnt, pages_per_mr); | |
0e353e34 CH |
391 | return -EINVAL; |
392 | } | |
393 | ||
67982414 | 394 | ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir); |
0e353e34 CH |
395 | if (!ret) |
396 | return -ENOMEM; | |
397 | sg_cnt = ret; | |
398 | ||
e9a53e73 | 399 | if (prot_sg_cnt) { |
67982414 | 400 | ret = rdma_rw_map_sg(dev, prot_sg, prot_sg_cnt, dir); |
e9a53e73 IR |
401 | if (!ret) { |
402 | ret = -ENOMEM; | |
403 | goto out_unmap_sg; | |
404 | } | |
405 | prot_sg_cnt = ret; | |
0e353e34 | 406 | } |
0e353e34 CH |
407 | |
408 | ctx->type = RDMA_RW_SIG_MR; | |
409 | ctx->nr_ops = 1; | |
e9a53e73 IR |
410 | ctx->reg = kcalloc(1, sizeof(*ctx->reg), GFP_KERNEL); |
411 | if (!ctx->reg) { | |
0e353e34 CH |
412 | ret = -ENOMEM; |
413 | goto out_unmap_prot_sg; | |
414 | } | |
415 | ||
e9a53e73 IR |
416 | ctx->reg->mr = ib_mr_pool_get(qp, &qp->sig_mrs); |
417 | if (!ctx->reg->mr) { | |
0e353e34 | 418 | ret = -EAGAIN; |
e9a53e73 | 419 | goto out_free_ctx; |
0e353e34 CH |
420 | } |
421 | ||
e9a53e73 | 422 | count += rdma_rw_inv_key(ctx->reg); |
0e353e34 | 423 | |
e9a53e73 | 424 | memcpy(ctx->reg->mr->sig_attrs, sig_attrs, sizeof(struct ib_sig_attrs)); |
0e353e34 | 425 | |
e9a53e73 IR |
426 | ret = ib_map_mr_sg_pi(ctx->reg->mr, sg, sg_cnt, NULL, prot_sg, |
427 | prot_sg_cnt, NULL, SZ_4K); | |
428 | if (unlikely(ret)) { | |
429 | pr_err("failed to map PI sg (%d)\n", sg_cnt + prot_sg_cnt); | |
430 | goto out_destroy_sig_mr; | |
0e353e34 CH |
431 | } |
432 | ||
e9a53e73 IR |
433 | ctx->reg->reg_wr.wr.opcode = IB_WR_REG_MR_INTEGRITY; |
434 | ctx->reg->reg_wr.wr.wr_cqe = NULL; | |
435 | ctx->reg->reg_wr.wr.num_sge = 0; | |
436 | ctx->reg->reg_wr.wr.send_flags = 0; | |
437 | ctx->reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE; | |
438 | if (rdma_protocol_iwarp(qp->device, port_num)) | |
439 | ctx->reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE; | |
440 | ctx->reg->reg_wr.mr = ctx->reg->mr; | |
441 | ctx->reg->reg_wr.key = ctx->reg->mr->lkey; | |
0e353e34 CH |
442 | count++; |
443 | ||
e9a53e73 IR |
444 | ctx->reg->sge.addr = ctx->reg->mr->iova; |
445 | ctx->reg->sge.length = ctx->reg->mr->length; | |
446 | if (sig_attrs->wire.sig_type == IB_SIG_TYPE_NONE) | |
447 | ctx->reg->sge.length -= ctx->reg->mr->sig_attrs->meta_length; | |
0e353e34 | 448 | |
e9a53e73 IR |
449 | rdma_wr = &ctx->reg->wr; |
450 | rdma_wr->wr.sg_list = &ctx->reg->sge; | |
0e353e34 CH |
451 | rdma_wr->wr.num_sge = 1; |
452 | rdma_wr->remote_addr = remote_addr; | |
453 | rdma_wr->rkey = rkey; | |
454 | if (dir == DMA_TO_DEVICE) | |
455 | rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; | |
456 | else | |
457 | rdma_wr->wr.opcode = IB_WR_RDMA_READ; | |
e9a53e73 | 458 | ctx->reg->reg_wr.wr.next = &rdma_wr->wr; |
0e353e34 CH |
459 | count++; |
460 | ||
461 | return count; | |
462 | ||
e9a53e73 IR |
463 | out_destroy_sig_mr: |
464 | ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr); | |
0e353e34 | 465 | out_free_ctx: |
e9a53e73 | 466 | kfree(ctx->reg); |
0e353e34 | 467 | out_unmap_prot_sg: |
e9a53e73 | 468 | if (prot_sg_cnt) |
67982414 | 469 | rdma_rw_unmap_sg(dev, prot_sg, prot_sg_cnt, dir); |
0e353e34 | 470 | out_unmap_sg: |
67982414 | 471 | rdma_rw_unmap_sg(dev, sg, sg_cnt, dir); |
0e353e34 CH |
472 | return ret; |
473 | } | |
474 | EXPORT_SYMBOL(rdma_rw_ctx_signature_init); | |
475 | ||
a060b562 CH |
476 | /* |
477 | * Now that we are going to post the WRs we can update the lkey and need_inval | |
478 | * state on the MRs. If we were doing this at init time, we would get double | |
479 | * or missing invalidations if a context was initialized but not actually | |
480 | * posted. | |
481 | */ | |
482 | static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx *reg, bool need_inval) | |
483 | { | |
484 | reg->mr->need_inval = need_inval; | |
485 | ib_update_fast_reg_key(reg->mr, ib_inc_rkey(reg->mr->lkey)); | |
486 | reg->reg_wr.key = reg->mr->lkey; | |
487 | reg->sge.lkey = reg->mr->lkey; | |
488 | } | |
489 | ||
490 | /** | |
491 | * rdma_rw_ctx_wrs - return chain of WRs for a RDMA READ or WRITE operation | |
492 | * @ctx: context to operate on | |
493 | * @qp: queue pair to operate on | |
494 | * @port_num: port num to which the connection is bound | |
495 | * @cqe: completion queue entry for the last WR | |
496 | * @chain_wr: WR to append to the posted chain | |
497 | * | |
498 | * Return the WR chain for the set of RDMA READ/WRITE operations described by | |
499 | * @ctx, as well as any memory registration operations needed. If @chain_wr | |
500 | * is non-NULL the WR it points to will be appended to the chain of WRs posted. | |
501 | * If @chain_wr is not set @cqe must be set so that the caller gets a | |
502 | * completion notification. | |
503 | */ | |
504 | struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, | |
505 | u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr) | |
506 | { | |
507 | struct ib_send_wr *first_wr, *last_wr; | |
508 | int i; | |
509 | ||
510 | switch (ctx->type) { | |
0e353e34 | 511 | case RDMA_RW_SIG_MR: |
a060b562 CH |
512 | case RDMA_RW_MR: |
513 | for (i = 0; i < ctx->nr_ops; i++) { | |
514 | rdma_rw_update_lkey(&ctx->reg[i], | |
515 | ctx->reg[i].wr.wr.opcode != | |
516 | IB_WR_RDMA_READ_WITH_INV); | |
517 | } | |
518 | ||
519 | if (ctx->reg[0].inv_wr.next) | |
520 | first_wr = &ctx->reg[0].inv_wr; | |
521 | else | |
522 | first_wr = &ctx->reg[0].reg_wr.wr; | |
523 | last_wr = &ctx->reg[ctx->nr_ops - 1].wr.wr; | |
524 | break; | |
525 | case RDMA_RW_MULTI_WR: | |
526 | first_wr = &ctx->map.wrs[0].wr; | |
527 | last_wr = &ctx->map.wrs[ctx->nr_ops - 1].wr; | |
528 | break; | |
529 | case RDMA_RW_SINGLE_WR: | |
530 | first_wr = &ctx->single.wr.wr; | |
531 | last_wr = &ctx->single.wr.wr; | |
532 | break; | |
533 | default: | |
534 | BUG(); | |
535 | } | |
536 | ||
537 | if (chain_wr) { | |
538 | last_wr->next = chain_wr; | |
539 | } else { | |
540 | last_wr->wr_cqe = cqe; | |
541 | last_wr->send_flags |= IB_SEND_SIGNALED; | |
542 | } | |
543 | ||
544 | return first_wr; | |
545 | } | |
546 | EXPORT_SYMBOL(rdma_rw_ctx_wrs); | |
547 | ||
548 | /** | |
549 | * rdma_rw_ctx_post - post a RDMA READ or RDMA WRITE operation | |
550 | * @ctx: context to operate on | |
551 | * @qp: queue pair to operate on | |
552 | * @port_num: port num to which the connection is bound | |
553 | * @cqe: completion queue entry for the last WR | |
554 | * @chain_wr: WR to append to the posted chain | |
555 | * | |
556 | * Post the set of RDMA READ/WRITE operations described by @ctx, as well as | |
557 | * any memory registration operations needed. If @chain_wr is non-NULL the | |
558 | * WR it points to will be appended to the chain of WRs posted. If @chain_wr | |
559 | * is not set @cqe must be set so that the caller gets a completion | |
560 | * notification. | |
561 | */ | |
562 | int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, | |
563 | struct ib_cqe *cqe, struct ib_send_wr *chain_wr) | |
564 | { | |
1fec77bf | 565 | struct ib_send_wr *first_wr; |
a060b562 CH |
566 | |
567 | first_wr = rdma_rw_ctx_wrs(ctx, qp, port_num, cqe, chain_wr); | |
1fec77bf | 568 | return ib_post_send(qp, first_wr, NULL); |
a060b562 CH |
569 | } |
570 | EXPORT_SYMBOL(rdma_rw_ctx_post); | |
571 | ||
572 | /** | |
573 | * rdma_rw_ctx_destroy - release all resources allocated by rdma_rw_ctx_init | |
574 | * @ctx: context to release | |
575 | * @qp: queue pair to operate on | |
576 | * @port_num: port num to which the connection is bound | |
577 | * @sg: scatterlist that was used for the READ/WRITE | |
578 | * @sg_cnt: number of entries in @sg | |
579 | * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ | |
580 | */ | |
581 | void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, | |
582 | struct scatterlist *sg, u32 sg_cnt, enum dma_data_direction dir) | |
583 | { | |
584 | int i; | |
585 | ||
586 | switch (ctx->type) { | |
587 | case RDMA_RW_MR: | |
588 | for (i = 0; i < ctx->nr_ops; i++) | |
589 | ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr); | |
590 | kfree(ctx->reg); | |
591 | break; | |
592 | case RDMA_RW_MULTI_WR: | |
593 | kfree(ctx->map.wrs); | |
594 | kfree(ctx->map.sges); | |
595 | break; | |
596 | case RDMA_RW_SINGLE_WR: | |
597 | break; | |
598 | default: | |
599 | BUG(); | |
600 | break; | |
601 | } | |
602 | ||
6affca14 | 603 | rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir); |
a060b562 CH |
604 | } |
605 | EXPORT_SYMBOL(rdma_rw_ctx_destroy); | |
606 | ||
0e353e34 CH |
607 | /** |
608 | * rdma_rw_ctx_destroy_signature - release all resources allocated by | |
2d465a16 | 609 | * rdma_rw_ctx_signature_init |
0e353e34 CH |
610 | * @ctx: context to release |
611 | * @qp: queue pair to operate on | |
612 | * @port_num: port num to which the connection is bound | |
613 | * @sg: scatterlist that was used for the READ/WRITE | |
614 | * @sg_cnt: number of entries in @sg | |
615 | * @prot_sg: scatterlist that was used for the READ/WRITE of the PI | |
616 | * @prot_sg_cnt: number of entries in @prot_sg | |
617 | * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ | |
618 | */ | |
619 | void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp, | |
620 | u8 port_num, struct scatterlist *sg, u32 sg_cnt, | |
621 | struct scatterlist *prot_sg, u32 prot_sg_cnt, | |
622 | enum dma_data_direction dir) | |
623 | { | |
624 | if (WARN_ON_ONCE(ctx->type != RDMA_RW_SIG_MR)) | |
625 | return; | |
626 | ||
e9a53e73 IR |
627 | ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr); |
628 | kfree(ctx->reg); | |
0e353e34 | 629 | |
e9a53e73 | 630 | if (prot_sg_cnt) |
67982414 MG |
631 | rdma_rw_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir); |
632 | rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir); | |
0e353e34 CH |
633 | } |
634 | EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature); | |
635 | ||
00628182 CL |
636 | /** |
637 | * rdma_rw_mr_factor - return number of MRs required for a payload | |
638 | * @device: device handling the connection | |
639 | * @port_num: port num to which the connection is bound | |
640 | * @maxpages: maximum payload pages per rdma_rw_ctx | |
641 | * | |
642 | * Returns the number of MRs the device requires to move @maxpayload | |
643 | * bytes. The returned value is used during transport creation to | |
644 | * compute max_rdma_ctxts and the size of the transport's Send and | |
645 | * Send Completion Queues. | |
646 | */ | |
647 | unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num, | |
648 | unsigned int maxpages) | |
649 | { | |
650 | unsigned int mr_pages; | |
651 | ||
652 | if (rdma_rw_can_use_mr(device, port_num)) | |
e9a53e73 | 653 | mr_pages = rdma_rw_fr_page_list_len(device, false); |
00628182 CL |
654 | else |
655 | mr_pages = device->attrs.max_sge_rd; | |
656 | return DIV_ROUND_UP(maxpages, mr_pages); | |
657 | } | |
658 | EXPORT_SYMBOL(rdma_rw_mr_factor); | |
659 | ||
a060b562 CH |
660 | void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr) |
661 | { | |
662 | u32 factor; | |
663 | ||
664 | WARN_ON_ONCE(attr->port_num == 0); | |
665 | ||
666 | /* | |
667 | * Each context needs at least one RDMA READ or WRITE WR. | |
668 | * | |
669 | * For some hardware we might need more, eventually we should ask the | |
670 | * HCA driver for a multiplier here. | |
671 | */ | |
672 | factor = 1; | |
673 | ||
674 | /* | |
675 | * If the devices needs MRs to perform RDMA READ or WRITE operations, | |
676 | * we'll need two additional MRs for the registrations and the | |
677 | * invalidation. | |
678 | */ | |
e9a53e73 IR |
679 | if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN || |
680 | rdma_rw_can_use_mr(dev, attr->port_num)) | |
a060b562 CH |
681 | factor += 2; /* inv + reg */ |
682 | ||
683 | attr->cap.max_send_wr += factor * attr->cap.max_rdma_ctxs; | |
684 | ||
685 | /* | |
686 | * But maybe we were just too high in the sky and the device doesn't | |
687 | * even support all we need, and we'll have to live with what we get.. | |
688 | */ | |
689 | attr->cap.max_send_wr = | |
690 | min_t(u32, attr->cap.max_send_wr, dev->attrs.max_qp_wr); | |
691 | } | |
692 | ||
693 | int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr) | |
694 | { | |
695 | struct ib_device *dev = qp->pd->device; | |
e9a53e73 | 696 | u32 nr_mrs = 0, nr_sig_mrs = 0, max_num_sg = 0; |
a060b562 CH |
697 | int ret = 0; |
698 | ||
c0a6cbb9 | 699 | if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) { |
0e353e34 | 700 | nr_sig_mrs = attr->cap.max_rdma_ctxs; |
e9a53e73 IR |
701 | nr_mrs = attr->cap.max_rdma_ctxs; |
702 | max_num_sg = rdma_rw_fr_page_list_len(dev, true); | |
0e353e34 CH |
703 | } else if (rdma_rw_can_use_mr(dev, attr->port_num)) { |
704 | nr_mrs = attr->cap.max_rdma_ctxs; | |
e9a53e73 | 705 | max_num_sg = rdma_rw_fr_page_list_len(dev, false); |
0e353e34 CH |
706 | } |
707 | ||
708 | if (nr_mrs) { | |
709 | ret = ib_mr_pool_init(qp, &qp->rdma_mrs, nr_mrs, | |
710 | IB_MR_TYPE_MEM_REG, | |
e9a53e73 | 711 | max_num_sg, 0); |
0e353e34 CH |
712 | if (ret) { |
713 | pr_err("%s: failed to allocated %d MRs\n", | |
714 | __func__, nr_mrs); | |
a060b562 | 715 | return ret; |
0e353e34 CH |
716 | } |
717 | } | |
718 | ||
719 | if (nr_sig_mrs) { | |
720 | ret = ib_mr_pool_init(qp, &qp->sig_mrs, nr_sig_mrs, | |
e9a53e73 | 721 | IB_MR_TYPE_INTEGRITY, max_num_sg, max_num_sg); |
0e353e34 CH |
722 | if (ret) { |
723 | pr_err("%s: failed to allocated %d SIG MRs\n", | |
f73e4076 | 724 | __func__, nr_sig_mrs); |
0e353e34 CH |
725 | goto out_free_rdma_mrs; |
726 | } | |
a060b562 CH |
727 | } |
728 | ||
0e353e34 CH |
729 | return 0; |
730 | ||
731 | out_free_rdma_mrs: | |
732 | ib_mr_pool_destroy(qp, &qp->rdma_mrs); | |
a060b562 CH |
733 | return ret; |
734 | } | |
735 | ||
736 | void rdma_rw_cleanup_mrs(struct ib_qp *qp) | |
737 | { | |
0e353e34 | 738 | ib_mr_pool_destroy(qp, &qp->sig_mrs); |
a060b562 CH |
739 | ib_mr_pool_destroy(qp, &qp->rdma_mrs); |
740 | } |