xprtrdma: Rename fields in rpcrdma_fmr
[linux-2.6-block.git] / net / sunrpc / xprtrdma / fmr_ops.c
CommitLineData
a0ce85f5
CL
1/*
2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 */
5
6/* Lightweight memory registration using Fast Memory Regions (FMR).
7 * Referred to sometimes as MTHCAFMR mode.
8 *
9 * FMR uses synchronous memory registration and deregistration.
10 * FMR registration is known to be fast, but FMR deregistration
11 * can take tens of usecs to complete.
12 */
13
fc7fbb59
CL
14/* Normal operation
15 *
16 * A Memory Region is prepared for RDMA READ or WRITE using the
17 * ib_map_phys_fmr verb (fmr_op_map). When the RDMA operation is
18 * finished, the Memory Region is unmapped using the ib_unmap_fmr
19 * verb (fmr_op_unmap).
20 */
21
22/* Transport recovery
23 *
24 * After a transport reconnect, fmr_op_map re-uses the MR already
25 * allocated for the RPC, but generates a fresh rkey then maps the
26 * MR again. This process is synchronous.
27 */
28
a0ce85f5
CL
29#include "xprt_rdma.h"
30
31#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
32# define RPCDBG_FACILITY RPCDBG_TRANS
33#endif
34
1c9351ee
CL
35/* Maximum scatter/gather per FMR */
36#define RPCRDMA_MAX_FMR_SGES (64)
37
d48b1d29
CL
38/* Access mode of externally registered pages */
39enum {
40 RPCRDMA_FMR_ACCESS_FLAGS = IB_ACCESS_REMOTE_WRITE |
41 IB_ACCESS_REMOTE_READ,
42};
43
ead3f26e
CL
44static struct workqueue_struct *fmr_recovery_wq;
45
46#define FMR_RECOVERY_WQ_FLAGS (WQ_UNBOUND)
47
48int
49fmr_alloc_recovery_wq(void)
50{
51 fmr_recovery_wq = alloc_workqueue("fmr_recovery", WQ_UNBOUND, 0);
52 return !fmr_recovery_wq ? -ENOMEM : 0;
53}
54
55void
56fmr_destroy_recovery_wq(void)
57{
58 struct workqueue_struct *wq;
59
60 if (!fmr_recovery_wq)
61 return;
62
63 wq = fmr_recovery_wq;
64 fmr_recovery_wq = NULL;
65 destroy_workqueue(wq);
66}
67
d48b1d29
CL
68static int
69__fmr_init(struct rpcrdma_mw *mw, struct ib_pd *pd)
70{
71 static struct ib_fmr_attr fmr_attr = {
72 .max_pages = RPCRDMA_MAX_FMR_SGES,
73 .max_maps = 1,
74 .page_shift = PAGE_SHIFT
75 };
76
88975ebe
CL
77 mw->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES,
78 sizeof(u64), GFP_KERNEL);
79 if (!mw->fmr.fm_physaddrs)
d48b1d29
CL
80 goto out_free;
81
82 mw->mw_sg = kcalloc(RPCRDMA_MAX_FMR_SGES,
83 sizeof(*mw->mw_sg), GFP_KERNEL);
84 if (!mw->mw_sg)
85 goto out_free;
86
87 sg_init_table(mw->mw_sg, RPCRDMA_MAX_FMR_SGES);
88
88975ebe
CL
89 mw->fmr.fm_mr = ib_alloc_fmr(pd, RPCRDMA_FMR_ACCESS_FLAGS,
90 &fmr_attr);
91 if (IS_ERR(mw->fmr.fm_mr))
d48b1d29
CL
92 goto out_fmr_err;
93
94 return 0;
95
96out_fmr_err:
97 dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__,
88975ebe 98 PTR_ERR(mw->fmr.fm_mr));
d48b1d29
CL
99
100out_free:
101 kfree(mw->mw_sg);
88975ebe 102 kfree(mw->fmr.fm_physaddrs);
d48b1d29
CL
103 return -ENOMEM;
104}
105
ead3f26e
CL
106static int
107__fmr_unmap(struct rpcrdma_mw *mw)
108{
109 LIST_HEAD(l);
38f1932e 110 int rc;
ead3f26e 111
88975ebe 112 list_add(&mw->fmr.fm_mr->list, &l);
38f1932e 113 rc = ib_unmap_fmr(&l);
88975ebe 114 list_del_init(&mw->fmr.fm_mr->list);
38f1932e 115 return rc;
ead3f26e
CL
116}
117
d48b1d29
CL
118static void
119__fmr_dma_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
120{
121 struct ib_device *device = r_xprt->rx_ia.ri_device;
122 int nsegs = seg->mr_nsegs;
123
124 while (nsegs--)
125 rpcrdma_unmap_one(device, seg++);
126}
127
128static void
129__fmr_release(struct rpcrdma_mw *r)
130{
131 int rc;
132
88975ebe 133 kfree(r->fmr.fm_physaddrs);
d48b1d29
CL
134 kfree(r->mw_sg);
135
88975ebe 136 rc = ib_dealloc_fmr(r->fmr.fm_mr);
d48b1d29
CL
137 if (rc)
138 pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n",
139 r, rc);
140}
141
ead3f26e
CL
142/* Deferred reset of a single FMR. Generate a fresh rkey by
143 * replacing the MR. There's no recovery if this fails.
144 */
145static void
146__fmr_recovery_worker(struct work_struct *work)
147{
148 struct rpcrdma_mw *mw = container_of(work, struct rpcrdma_mw,
149 mw_work);
150 struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
151
152 __fmr_unmap(mw);
153 rpcrdma_put_mw(r_xprt, mw);
154 return;
155}
156
157/* A broken MR was discovered in a context that can't sleep.
158 * Defer recovery to the recovery worker.
159 */
160static void
161__fmr_queue_recovery(struct rpcrdma_mw *mw)
162{
163 INIT_WORK(&mw->mw_work, __fmr_recovery_worker);
164 queue_work(fmr_recovery_wq, &mw->mw_work);
165}
166
3968cb58
CL
167static int
168fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
169 struct rpcrdma_create_data_internal *cdata)
170{
302d3deb
CL
171 rpcrdma_set_max_header_sizes(ia, cdata, max_t(unsigned int, 1,
172 RPCRDMA_MAX_DATA_SEGS /
173 RPCRDMA_MAX_FMR_SGES));
3968cb58
CL
174 return 0;
175}
176
1c9351ee
CL
177/* FMR mode conveys up to 64 pages of payload per chunk segment.
178 */
179static size_t
180fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
181{
182 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
94931746 183 RPCRDMA_MAX_HDR_SEGS * RPCRDMA_MAX_FMR_SGES);
1c9351ee
CL
184}
185
91e70e70
CL
186static int
187fmr_op_init(struct rpcrdma_xprt *r_xprt)
188{
189 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
91e70e70
CL
190 struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
191 struct rpcrdma_mw *r;
192 int i, rc;
193
58d1dcf5 194 spin_lock_init(&buf->rb_mwlock);
91e70e70
CL
195 INIT_LIST_HEAD(&buf->rb_mws);
196 INIT_LIST_HEAD(&buf->rb_all);
197
40c6ed0c
CL
198 i = max_t(int, RPCRDMA_MAX_DATA_SEGS / RPCRDMA_MAX_FMR_SGES, 1);
199 i += 2; /* head + tail */
200 i *= buf->rb_max_requests; /* one set for each RPC slot */
201 dprintk("RPC: %s: initalizing %d FMRs\n", __func__, i);
91e70e70
CL
202
203 while (i--) {
204 r = kzalloc(sizeof(*r), GFP_KERNEL);
205 if (!r)
d48b1d29 206 return -ENOMEM;
91e70e70 207
d48b1d29
CL
208 rc = __fmr_init(r, pd);
209 if (rc) {
210 kfree(r);
211 return rc;
212 }
91e70e70 213
ead3f26e 214 r->mw_xprt = r_xprt;
91e70e70
CL
215 list_add(&r->mw_list, &buf->rb_mws);
216 list_add(&r->mw_all, &buf->rb_all);
217 }
218 return 0;
91e70e70
CL
219}
220
9c1b4d77
CL
221/* Use the ib_map_phys_fmr() verb to register a memory region
222 * for remote access via RDMA READ or RDMA WRITE.
223 */
224static int
225fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
226 int nsegs, bool writing)
227{
228 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
89e0d112 229 struct ib_device *device = ia->ri_device;
d654788e 230 enum dma_data_direction direction = rpcrdma_data_dir(writing);
9c1b4d77 231 struct rpcrdma_mr_seg *seg1 = seg;
9c1b4d77 232 int len, pageoff, i, rc;
fc7fbb59
CL
233 struct rpcrdma_mw *mw;
234
235 mw = seg1->rl_mw;
236 seg1->rl_mw = NULL;
237 if (!mw) {
238 mw = rpcrdma_get_mw(r_xprt);
239 if (!mw)
240 return -ENOMEM;
241 } else {
242 /* this is a retransmit; generate a fresh rkey */
243 rc = __fmr_unmap(mw);
244 if (rc)
245 return rc;
246 }
9c1b4d77
CL
247
248 pageoff = offset_in_page(seg1->mr_offset);
249 seg1->mr_offset -= pageoff; /* start of page */
250 seg1->mr_len += pageoff;
251 len = -pageoff;
252 if (nsegs > RPCRDMA_MAX_FMR_SGES)
253 nsegs = RPCRDMA_MAX_FMR_SGES;
254 for (i = 0; i < nsegs;) {
d654788e 255 rpcrdma_map_one(device, seg, direction);
88975ebe 256 mw->fmr.fm_physaddrs[i] = seg->mr_dma;
9c1b4d77
CL
257 len += seg->mr_len;
258 ++seg;
259 ++i;
260 /* Check for holes */
261 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
262 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
263 break;
264 }
265
88975ebe 266 rc = ib_map_phys_fmr(mw->fmr.fm_mr, mw->fmr.fm_physaddrs,
acb9da7a 267 i, seg1->mr_dma);
9c1b4d77
CL
268 if (rc)
269 goto out_maperr;
270
fc7fbb59 271 seg1->rl_mw = mw;
88975ebe 272 seg1->mr_rkey = mw->fmr.fm_mr->rkey;
9c1b4d77
CL
273 seg1->mr_base = seg1->mr_dma + pageoff;
274 seg1->mr_nsegs = i;
275 seg1->mr_len = len;
276 return i;
277
278out_maperr:
279 dprintk("RPC: %s: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
280 __func__, len, (unsigned long long)seg1->mr_dma,
281 pageoff, i, rc);
282 while (i--)
d654788e 283 rpcrdma_unmap_one(device, --seg);
9c1b4d77
CL
284 return rc;
285}
286
7c7a5390
CL
287/* Invalidate all memory regions that were registered for "req".
288 *
289 * Sleeps until it is safe for the host CPU to access the
290 * previously mapped memory regions.
291 */
292static void
293fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
294{
295 struct rpcrdma_mr_seg *seg;
296 unsigned int i, nchunks;
297 struct rpcrdma_mw *mw;
298 LIST_HEAD(unmap_list);
299 int rc;
300
301 dprintk("RPC: %s: req %p\n", __func__, req);
302
303 /* ORDER: Invalidate all of the req's MRs first
304 *
305 * ib_unmap_fmr() is slow, so use a single call instead
306 * of one call per mapped MR.
307 */
308 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
309 seg = &req->rl_segments[i];
310 mw = seg->rl_mw;
311
88975ebe 312 list_add_tail(&mw->fmr.fm_mr->list, &unmap_list);
7c7a5390
CL
313
314 i += seg->mr_nsegs;
315 }
316 rc = ib_unmap_fmr(&unmap_list);
317 if (rc)
318 pr_warn("%s: ib_unmap_fmr failed (%i)\n", __func__, rc);
319
320 /* ORDER: Now DMA unmap all of the req's MRs, and return
321 * them to the free MW list.
322 */
323 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
324 seg = &req->rl_segments[i];
38f1932e 325 mw = seg->rl_mw;
7c7a5390 326
88975ebe 327 list_del_init(&mw->fmr.fm_mr->list);
7c7a5390 328 __fmr_dma_unmap(r_xprt, seg);
763bc230 329 rpcrdma_put_mw(r_xprt, seg->rl_mw);
7c7a5390
CL
330
331 i += seg->mr_nsegs;
332 seg->mr_nsegs = 0;
763bc230 333 seg->rl_mw = NULL;
7c7a5390
CL
334 }
335
336 req->rl_nchunks = 0;
337}
338
ead3f26e
CL
339/* Use a slow, safe mechanism to invalidate all memory regions
340 * that were registered for "req".
341 *
342 * In the asynchronous case, DMA unmapping occurs first here
343 * because the rpcrdma_mr_seg is released immediately after this
344 * call. It's contents won't be available in __fmr_dma_unmap later.
345 * FIXME.
346 */
347static void
348fmr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
349 bool sync)
350{
351 struct rpcrdma_mr_seg *seg;
352 struct rpcrdma_mw *mw;
353 unsigned int i;
354
355 for (i = 0; req->rl_nchunks; req->rl_nchunks--) {
356 seg = &req->rl_segments[i];
357 mw = seg->rl_mw;
358
359 if (sync) {
360 /* ORDER */
361 __fmr_unmap(mw);
362 __fmr_dma_unmap(r_xprt, seg);
363 rpcrdma_put_mw(r_xprt, mw);
364 } else {
365 __fmr_dma_unmap(r_xprt, seg);
366 __fmr_queue_recovery(mw);
367 }
368
369 i += seg->mr_nsegs;
370 seg->mr_nsegs = 0;
371 seg->rl_mw = NULL;
372 }
373}
374
4561f347
CL
375static void
376fmr_op_destroy(struct rpcrdma_buffer *buf)
377{
378 struct rpcrdma_mw *r;
4561f347
CL
379
380 while (!list_empty(&buf->rb_all)) {
381 r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
382 list_del(&r->mw_all);
d48b1d29 383 __fmr_release(r);
4561f347
CL
384 kfree(r);
385 }
386}
387
a0ce85f5 388const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
9c1b4d77 389 .ro_map = fmr_op_map,
7c7a5390 390 .ro_unmap_sync = fmr_op_unmap_sync,
ead3f26e 391 .ro_unmap_safe = fmr_op_unmap_safe,
3968cb58 392 .ro_open = fmr_op_open,
1c9351ee 393 .ro_maxpages = fmr_op_maxpages,
91e70e70 394 .ro_init = fmr_op_init,
4561f347 395 .ro_destroy = fmr_op_destroy,
a0ce85f5
CL
396 .ro_displayname = "fmr",
397};