xprtrdma: Refactor MR recovery work queues
[linux-2.6-block.git] / net / sunrpc / xprtrdma / frwr_ops.c
CommitLineData
a0ce85f5
CL
1/*
2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 */
5
6/* Lightweight memory registration using Fast Registration Work
7 * Requests (FRWR). Also referred to sometimes as FRMR mode.
8 *
9 * FRWR features ordered asynchronous registration and deregistration
10 * of arbitrarily sized memory regions. This is the fastest and safest
11 * but most complex memory registration mode.
12 */
13
c14d86e5
CL
14/* Normal operation
15 *
16 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
17 * Work Request (frmr_op_map). When the RDMA operation is finished, this
18 * Memory Region is invalidated using a LOCAL_INV Work Request
19 * (frmr_op_unmap).
20 *
21 * Typically these Work Requests are not signaled, and neither are RDMA
22 * SEND Work Requests (with the exception of signaling occasionally to
23 * prevent provider work queue overflows). This greatly reduces HCA
24 * interrupt workload.
25 *
26 * As an optimization, frwr_op_unmap marks MRs INVALID before the
27 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
28 * rb_mws immediately so that no work (like managing a linked list
29 * under a spinlock) is needed in the completion upcall.
30 *
31 * But this means that frwr_op_map() can occasionally encounter an MR
32 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
33 * ordering prevents a subsequent FAST_REG WR from executing against
34 * that MR while it is still being invalidated.
35 */
36
37/* Transport recovery
38 *
39 * ->op_map and the transport connect worker cannot run at the same
40 * time, but ->op_unmap can fire while the transport connect worker
41 * is running. Thus MR recovery is handled in ->op_map, to guarantee
42 * that recovered MRs are owned by a sending RPC, and not one where
43 * ->op_unmap could fire at the same time transport reconnect is
44 * being done.
45 *
46 * When the underlying transport disconnects, MRs are left in one of
47 * three states:
48 *
49 * INVALID: The MR was not in use before the QP entered ERROR state.
50 * (Or, the LOCAL_INV WR has not completed or flushed yet).
51 *
52 * STALE: The MR was being registered or unregistered when the QP
53 * entered ERROR state, and the pending WR was flushed.
54 *
55 * VALID: The MR was registered before the QP entered ERROR state.
56 *
57 * When frwr_op_map encounters STALE and VALID MRs, they are recovered
58 * with ib_dereg_mr and then are re-initialized. Beause MR recovery
59 * allocates fresh resources, it is deferred to a workqueue, and the
60 * recovered MRs are placed back on the rb_mws list when recovery is
61 * complete. frwr_op_map allocates another MR for the current RPC while
62 * the broken MR is reset.
63 *
64 * To ensure that frwr_op_map doesn't encounter an MR that is marked
65 * INVALID but that is about to be flushed due to a previous transport
66 * disconnect, the transport connect worker attempts to drain all
67 * pending send queue WRs before the transport is reconnected.
68 */
69
a0ce85f5
CL
70#include "xprt_rdma.h"
71
72#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
73# define RPCDBG_FACILITY RPCDBG_TRANS
74#endif
75
d48b1d29
CL
76static int
77__frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, unsigned int depth)
78{
79 struct rpcrdma_frmr *f = &r->frmr;
80 int rc;
81
82 f->fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth);
83 if (IS_ERR(f->fr_mr))
84 goto out_mr_err;
85
86 r->mw_sg = kcalloc(depth, sizeof(*r->mw_sg), GFP_KERNEL);
87 if (!r->mw_sg)
88 goto out_list_err;
89
90 sg_init_table(r->mw_sg, depth);
91 init_completion(&f->fr_linv_done);
92 return 0;
93
94out_mr_err:
95 rc = PTR_ERR(f->fr_mr);
96 dprintk("RPC: %s: ib_alloc_mr status %i\n",
97 __func__, rc);
98 return rc;
99
100out_list_err:
101 rc = -ENOMEM;
102 dprintk("RPC: %s: sg allocation failure\n",
103 __func__);
104 ib_dereg_mr(f->fr_mr);
105 return rc;
106}
107
108static void
109__frwr_release(struct rpcrdma_mw *r)
110{
111 int rc;
112
113 rc = ib_dereg_mr(r->frmr.fr_mr);
114 if (rc)
115 pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
116 r, rc);
117 kfree(r->mw_sg);
118}
119
d7a21c1b
CL
120static int
121__frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
122{
123 struct rpcrdma_frmr *f = &r->frmr;
124 int rc;
125
126 rc = ib_dereg_mr(f->fr_mr);
127 if (rc) {
128 pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
129 rc, r);
130 return rc;
131 }
132
133 f->fr_mr = ib_alloc_mr(ia->ri_pd, IB_MR_TYPE_MEM_REG,
134 ia->ri_max_frmr_depth);
135 if (IS_ERR(f->fr_mr)) {
136 pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
137 PTR_ERR(f->fr_mr), r);
138 return PTR_ERR(f->fr_mr);
139 }
140
141 dprintk("RPC: %s: recovered FRMR %p\n", __func__, r);
142 f->fr_state = FRMR_IS_INVALID;
143 return 0;
144}
145
505bbe64
CL
146/* Reset of a single FRMR. Generate a fresh rkey by replacing the MR.
147 *
148 * There's no recovery if this fails. The FRMR is abandoned, but
149 * remains in rb_all. It will be cleaned up when the transport is
150 * destroyed.
151 */
660bb497 152static void
505bbe64 153frwr_op_recover_mr(struct rpcrdma_mw *mw)
660bb497 154{
564471d2 155 struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
660bb497 156 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
660bb497
CL
157 int rc;
158
159 rc = __frwr_reset_mr(ia, mw);
564471d2 160 ib_dma_unmap_sg(ia->ri_device, mw->mw_sg, mw->mw_nents, mw->mw_dir);
505bbe64
CL
161 if (rc) {
162 pr_err("rpcrdma: FRMR reset status %d, %p orphaned\n",
163 rc, mw);
164 r_xprt->rx_stats.mrs_orphaned++;
660bb497 165 return;
505bbe64 166 }
951e721c 167
505bbe64
CL
168 rpcrdma_put_mw(r_xprt, mw);
169 r_xprt->rx_stats.mrs_recovered++;
951e721c
CL
170}
171
3968cb58
CL
172static int
173frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
174 struct rpcrdma_create_data_internal *cdata)
175{
3968cb58
CL
176 int depth, delta;
177
178 ia->ri_max_frmr_depth =
179 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
e3e45b1b 180 ia->ri_device->attrs.max_fast_reg_page_list_len);
3968cb58
CL
181 dprintk("RPC: %s: device's max FR page list len = %u\n",
182 __func__, ia->ri_max_frmr_depth);
183
184 /* Add room for frmr register and invalidate WRs.
185 * 1. FRMR reg WR for head
186 * 2. FRMR invalidate WR for head
187 * 3. N FRMR reg WRs for pagelist
188 * 4. N FRMR invalidate WRs for pagelist
189 * 5. FRMR reg WR for tail
190 * 6. FRMR invalidate WR for tail
191 * 7. The RDMA_SEND WR
192 */
193 depth = 7;
194
195 /* Calculate N if the device max FRMR depth is smaller than
196 * RPCRDMA_MAX_DATA_SEGS.
197 */
198 if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
199 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth;
200 do {
201 depth += 2; /* FRMR reg + invalidate */
202 delta -= ia->ri_max_frmr_depth;
203 } while (delta > 0);
204 }
205
206 ep->rep_attr.cap.max_send_wr *= depth;
e3e45b1b
OG
207 if (ep->rep_attr.cap.max_send_wr > ia->ri_device->attrs.max_qp_wr) {
208 cdata->max_requests = ia->ri_device->attrs.max_qp_wr / depth;
3968cb58
CL
209 if (!cdata->max_requests)
210 return -EINVAL;
211 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
212 depth;
213 }
214
302d3deb
CL
215 rpcrdma_set_max_header_sizes(ia, cdata, max_t(unsigned int, 1,
216 RPCRDMA_MAX_DATA_SEGS /
217 ia->ri_max_frmr_depth));
3968cb58
CL
218 return 0;
219}
220
1c9351ee
CL
221/* FRWR mode conveys a list of pages per chunk segment. The
222 * maximum length of that list is the FRWR page list depth.
223 */
224static size_t
225frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
226{
227 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
228
229 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
94931746 230 RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frmr_depth);
1c9351ee
CL
231}
232
2fa8f88d
CL
233static void
234__frwr_sendcompletion_flush(struct ib_wc *wc, struct rpcrdma_frmr *frmr,
235 const char *wr)
236{
237 frmr->fr_state = FRMR_IS_STALE;
238 if (wc->status != IB_WC_WR_FLUSH_ERR)
239 pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
240 wr, ib_wc_status_msg(wc->status),
241 wc->status, wc->vendor_err);
242}
243
244/**
245 * frwr_wc_fastreg - Invoked by RDMA provider for each polled FastReg WC
246 * @cq: completion queue (ignored)
247 * @wc: completed WR
c9918ff5 248 *
c9918ff5 249 */
e46ac34c 250static void
2fa8f88d 251frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
e46ac34c 252{
2fa8f88d
CL
253 struct rpcrdma_frmr *frmr;
254 struct ib_cqe *cqe;
c9918ff5 255
2fa8f88d
CL
256 /* WARNING: Only wr_cqe and status are reliable at this point */
257 if (wc->status != IB_WC_SUCCESS) {
258 cqe = wc->wr_cqe;
259 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
260 __frwr_sendcompletion_flush(wc, frmr, "fastreg");
261 }
e46ac34c
CL
262}
263
2fa8f88d
CL
264/**
265 * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
266 * @cq: completion queue (ignored)
267 * @wc: completed WR
268 *
269 */
c9918ff5 270static void
2fa8f88d 271frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
c9918ff5 272{
2fa8f88d
CL
273 struct rpcrdma_frmr *frmr;
274 struct ib_cqe *cqe;
c9918ff5 275
2fa8f88d
CL
276 /* WARNING: Only wr_cqe and status are reliable at this point */
277 if (wc->status != IB_WC_SUCCESS) {
278 cqe = wc->wr_cqe;
279 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
280 __frwr_sendcompletion_flush(wc, frmr, "localinv");
281 }
282}
c9918ff5 283
2fa8f88d
CL
284/**
285 * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
286 * @cq: completion queue (ignored)
287 * @wc: completed WR
288 *
289 * Awaken anyone waiting for an MR to finish being fenced.
290 */
291static void
292frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
293{
294 struct rpcrdma_frmr *frmr;
295 struct ib_cqe *cqe;
296
297 /* WARNING: Only wr_cqe and status are reliable at this point */
298 cqe = wc->wr_cqe;
299 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
300 if (wc->status != IB_WC_SUCCESS)
301 __frwr_sendcompletion_flush(wc, frmr, "localinv");
302 complete_all(&frmr->fr_linv_done);
c9918ff5
CL
303}
304
91e70e70
CL
305static int
306frwr_op_init(struct rpcrdma_xprt *r_xprt)
307{
308 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
91e70e70
CL
309 unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
310 struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
311 int i;
312
58d1dcf5 313 spin_lock_init(&buf->rb_mwlock);
91e70e70
CL
314 INIT_LIST_HEAD(&buf->rb_mws);
315 INIT_LIST_HEAD(&buf->rb_all);
316
40c6ed0c
CL
317 i = max_t(int, RPCRDMA_MAX_DATA_SEGS / depth, 1);
318 i += 2; /* head + tail */
319 i *= buf->rb_max_requests; /* one set for each RPC slot */
320 dprintk("RPC: %s: initalizing %d FRMRs\n", __func__, i);
91e70e70
CL
321
322 while (i--) {
323 struct rpcrdma_mw *r;
324 int rc;
325
326 r = kzalloc(sizeof(*r), GFP_KERNEL);
327 if (!r)
328 return -ENOMEM;
329
564471d2 330 rc = __frwr_init(r, pd, depth);
91e70e70
CL
331 if (rc) {
332 kfree(r);
333 return rc;
334 }
335
766656b0 336 r->mw_xprt = r_xprt;
91e70e70
CL
337 list_add(&r->mw_list, &buf->rb_mws);
338 list_add(&r->mw_all, &buf->rb_all);
339 }
340
341 return 0;
342}
343
564471d2 344/* Post a REG_MR Work Request to register a memory region
9c1b4d77
CL
345 * for remote access via RDMA READ or RDMA WRITE.
346 */
347static int
348frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
349 int nsegs, bool writing)
350{
351 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
352 struct rpcrdma_mr_seg *seg1 = seg;
c14d86e5
CL
353 struct rpcrdma_mw *mw;
354 struct rpcrdma_frmr *frmr;
355 struct ib_mr *mr;
3cf4e169 356 struct ib_reg_wr *reg_wr;
e622f2f4 357 struct ib_send_wr *bad_wr;
4143f34e 358 int rc, i, n, dma_nents;
9c1b4d77 359 u8 key;
9c1b4d77 360
c14d86e5
CL
361 mw = seg1->rl_mw;
362 seg1->rl_mw = NULL;
363 do {
364 if (mw)
505bbe64 365 rpcrdma_defer_mr_recovery(mw);
c14d86e5
CL
366 mw = rpcrdma_get_mw(r_xprt);
367 if (!mw)
368 return -ENOMEM;
c882a655
CL
369 } while (mw->frmr.fr_state != FRMR_IS_INVALID);
370 frmr = &mw->frmr;
c14d86e5 371 frmr->fr_state = FRMR_IS_VALID;
4143f34e 372 mr = frmr->fr_mr;
3cf4e169 373 reg_wr = &frmr->fr_regwr;
c14d86e5 374
9c1b4d77
CL
375 if (nsegs > ia->ri_max_frmr_depth)
376 nsegs = ia->ri_max_frmr_depth;
4143f34e
SG
377 for (i = 0; i < nsegs;) {
378 if (seg->mr_page)
564471d2 379 sg_set_page(&mw->mw_sg[i],
4143f34e
SG
380 seg->mr_page,
381 seg->mr_len,
382 offset_in_page(seg->mr_offset));
383 else
564471d2 384 sg_set_buf(&mw->mw_sg[i], seg->mr_offset,
4143f34e
SG
385 seg->mr_len);
386
9c1b4d77
CL
387 ++seg;
388 ++i;
4143f34e 389
9c1b4d77
CL
390 /* Check for holes */
391 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
392 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
393 break;
394 }
564471d2
CL
395 mw->mw_nents = i;
396 mw->mw_dir = rpcrdma_data_dir(writing);
4143f34e 397
564471d2
CL
398 dma_nents = ib_dma_map_sg(ia->ri_device,
399 mw->mw_sg, mw->mw_nents, mw->mw_dir);
400 if (!dma_nents)
401 goto out_dmamap_err;
402
403 n = ib_map_mr_sg(mr, mw->mw_sg, mw->mw_nents, NULL, PAGE_SIZE);
404 if (unlikely(n != mw->mw_nents))
405 goto out_mapmr_err;
4143f34e
SG
406
407 dprintk("RPC: %s: Using frmr %p to map %u segments (%u bytes)\n",
564471d2 408 __func__, mw, mw->mw_nents, mr->length);
4143f34e 409
9c1b4d77
CL
410 key = (u8)(mr->rkey & 0x000000FF);
411 ib_update_fast_reg_key(mr, ++key);
4143f34e 412
3cf4e169
CL
413 reg_wr->wr.next = NULL;
414 reg_wr->wr.opcode = IB_WR_REG_MR;
2fa8f88d
CL
415 frmr->fr_cqe.done = frwr_wc_fastreg;
416 reg_wr->wr.wr_cqe = &frmr->fr_cqe;
3cf4e169
CL
417 reg_wr->wr.num_sge = 0;
418 reg_wr->wr.send_flags = 0;
419 reg_wr->mr = mr;
420 reg_wr->key = mr->rkey;
421 reg_wr->access = writing ?
422 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
423 IB_ACCESS_REMOTE_READ;
9c1b4d77
CL
424
425 DECR_CQCOUNT(&r_xprt->rx_ep);
3cf4e169 426 rc = ib_post_send(ia->ri_id->qp, &reg_wr->wr, &bad_wr);
9c1b4d77
CL
427 if (rc)
428 goto out_senderr;
429
c14d86e5 430 seg1->rl_mw = mw;
9c1b4d77 431 seg1->mr_rkey = mr->rkey;
4143f34e 432 seg1->mr_base = mr->iova;
564471d2 433 seg1->mr_nsegs = mw->mw_nents;
4143f34e
SG
434 seg1->mr_len = mr->length;
435
564471d2
CL
436 return mw->mw_nents;
437
438out_dmamap_err:
439 pr_err("rpcrdma: failed to dma map sg %p sg_nents %u\n",
440 mw->mw_sg, mw->mw_nents);
441 return -ENOMEM;
442
443out_mapmr_err:
444 pr_err("rpcrdma: failed to map mr %p (%u/%u)\n",
445 frmr->fr_mr, n, mw->mw_nents);
446 rc = n < 0 ? n : -EIO;
505bbe64 447 rpcrdma_defer_mr_recovery(mw);
564471d2 448 return rc;
9c1b4d77
CL
449
450out_senderr:
505bbe64 451 rpcrdma_defer_mr_recovery(mw);
9c1b4d77
CL
452 return rc;
453}
454
c9918ff5
CL
455static struct ib_send_wr *
456__frwr_prepare_linv_wr(struct rpcrdma_mr_seg *seg)
457{
458 struct rpcrdma_mw *mw = seg->rl_mw;
c882a655 459 struct rpcrdma_frmr *f = &mw->frmr;
c9918ff5
CL
460 struct ib_send_wr *invalidate_wr;
461
c9918ff5
CL
462 f->fr_state = FRMR_IS_INVALID;
463 invalidate_wr = &f->fr_invwr;
464
465 memset(invalidate_wr, 0, sizeof(*invalidate_wr));
2fa8f88d
CL
466 f->fr_cqe.done = frwr_wc_localinv;
467 invalidate_wr->wr_cqe = &f->fr_cqe;
c9918ff5
CL
468 invalidate_wr->opcode = IB_WR_LOCAL_INV;
469 invalidate_wr->ex.invalidate_rkey = f->fr_mr->rkey;
470
471 return invalidate_wr;
472}
473
c9918ff5
CL
474/* Invalidate all memory regions that were registered for "req".
475 *
476 * Sleeps until it is safe for the host CPU to access the
477 * previously mapped memory regions.
478 */
479static void
480frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
481{
482 struct ib_send_wr *invalidate_wrs, *pos, *prev, *bad_wr;
483 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
484 struct rpcrdma_mr_seg *seg;
485 unsigned int i, nchunks;
486 struct rpcrdma_frmr *f;
d7a21c1b 487 struct rpcrdma_mw *mw;
c9918ff5
CL
488 int rc;
489
490 dprintk("RPC: %s: req %p\n", __func__, req);
491
492 /* ORDER: Invalidate all of the req's MRs first
493 *
494 * Chain the LOCAL_INV Work Requests and post them with
495 * a single ib_post_send() call.
496 */
497 invalidate_wrs = pos = prev = NULL;
498 seg = NULL;
499 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
500 seg = &req->rl_segments[i];
501
502 pos = __frwr_prepare_linv_wr(seg);
503
504 if (!invalidate_wrs)
505 invalidate_wrs = pos;
506 else
507 prev->next = pos;
508 prev = pos;
509
510 i += seg->mr_nsegs;
511 }
c882a655 512 f = &seg->rl_mw->frmr;
c9918ff5
CL
513
514 /* Strong send queue ordering guarantees that when the
515 * last WR in the chain completes, all WRs in the chain
516 * are complete.
517 */
518 f->fr_invwr.send_flags = IB_SEND_SIGNALED;
2fa8f88d
CL
519 f->fr_cqe.done = frwr_wc_localinv_wake;
520 reinit_completion(&f->fr_linv_done);
c9918ff5
CL
521 INIT_CQCOUNT(&r_xprt->rx_ep);
522
523 /* Transport disconnect drains the receive CQ before it
524 * replaces the QP. The RPC reply handler won't call us
525 * unless ri_id->qp is a valid pointer.
526 */
527 rc = ib_post_send(ia->ri_id->qp, invalidate_wrs, &bad_wr);
d7a21c1b
CL
528 if (rc)
529 goto reset_mrs;
c9918ff5
CL
530
531 wait_for_completion(&f->fr_linv_done);
532
533 /* ORDER: Now DMA unmap all of the req's MRs, and return
534 * them to the free MW list.
535 */
b892a699 536unmap:
c9918ff5
CL
537 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
538 seg = &req->rl_segments[i];
d7a21c1b
CL
539 mw = seg->rl_mw;
540 seg->rl_mw = NULL;
c9918ff5 541
564471d2
CL
542 ib_dma_unmap_sg(ia->ri_device,
543 mw->mw_sg, mw->mw_nents, mw->mw_dir);
d7a21c1b 544 rpcrdma_put_mw(r_xprt, mw);
c9918ff5
CL
545
546 i += seg->mr_nsegs;
547 seg->mr_nsegs = 0;
548 }
549
550 req->rl_nchunks = 0;
d7a21c1b 551 return;
c9918ff5 552
d7a21c1b
CL
553reset_mrs:
554 pr_warn("%s: ib_post_send failed %i\n", __func__, rc);
6814baea 555
d7a21c1b
CL
556 /* Find and reset the MRs in the LOCAL_INV WRs that did not
557 * get posted. This is synchronous, and slow.
558 */
559 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
560 seg = &req->rl_segments[i];
561 mw = seg->rl_mw;
562 f = &mw->frmr;
c14d86e5 563
d7a21c1b
CL
564 if (mw->frmr.fr_mr->rkey == bad_wr->ex.invalidate_rkey) {
565 __frwr_reset_mr(ia, mw);
566 bad_wr = bad_wr->next;
567 }
6814baea 568
d7a21c1b
CL
569 i += seg->mr_nsegs;
570 }
571 goto unmap;
c9918ff5 572}
6814baea 573
ead3f26e
CL
574/* Use a slow, safe mechanism to invalidate all memory regions
575 * that were registered for "req".
576 */
577static void
578frwr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
579 bool sync)
580{
581 struct rpcrdma_mr_seg *seg;
582 struct rpcrdma_mw *mw;
583 unsigned int i;
c14d86e5 584
ead3f26e
CL
585 for (i = 0; req->rl_nchunks; req->rl_nchunks--) {
586 seg = &req->rl_segments[i];
587 mw = seg->rl_mw;
6814baea 588
ead3f26e 589 if (sync)
505bbe64 590 frwr_op_recover_mr(mw);
ead3f26e 591 else
505bbe64 592 rpcrdma_defer_mr_recovery(mw);
ead3f26e
CL
593
594 i += seg->mr_nsegs;
595 seg->mr_nsegs = 0;
596 seg->rl_mw = NULL;
597 }
6814baea
CL
598}
599
4561f347
CL
600static void
601frwr_op_destroy(struct rpcrdma_buffer *buf)
602{
603 struct rpcrdma_mw *r;
604
605 while (!list_empty(&buf->rb_all)) {
606 r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
607 list_del(&r->mw_all);
608 __frwr_release(r);
609 kfree(r);
610 }
611}
612
a0ce85f5 613const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
9c1b4d77 614 .ro_map = frwr_op_map,
c9918ff5 615 .ro_unmap_sync = frwr_op_unmap_sync,
ead3f26e 616 .ro_unmap_safe = frwr_op_unmap_safe,
505bbe64 617 .ro_recover_mr = frwr_op_recover_mr,
3968cb58 618 .ro_open = frwr_op_open,
1c9351ee 619 .ro_maxpages = frwr_op_maxpages,
91e70e70 620 .ro_init = frwr_op_init,
4561f347 621 .ro_destroy = frwr_op_destroy,
a0ce85f5
CL
622 .ro_displayname = "frwr",
623};