1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2015, 2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
7 /* Lightweight memory registration using Fast Registration Work
10 * FRWR features ordered asynchronous registration and deregistration
11 * of arbitrarily sized memory regions. This is the fastest and safest
12 * but most complex memory registration mode.
17 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
18 * Work Request (frwr_op_map). When the RDMA operation is finished, this
19 * Memory Region is invalidated using a LOCAL_INV Work Request
20 * (frwr_op_unmap_sync).
22 * Typically these Work Requests are not signaled, and neither are RDMA
23 * SEND Work Requests (with the exception of signaling occasionally to
24 * prevent provider work queue overflows). This greatly reduces HCA
27 * As an optimization, frwr_op_unmap marks MRs INVALID before the
28 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
29 * rb_mrs immediately so that no work (like managing a linked list
30 * under a spinlock) is needed in the completion upcall.
32 * But this means that frwr_op_map() can occasionally encounter an MR
33 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
34 * ordering prevents a subsequent FAST_REG WR from executing against
35 * that MR while it is still being invalidated.
40 * ->op_map and the transport connect worker cannot run at the same
41 * time, but ->op_unmap can fire while the transport connect worker
42 * is running. Thus MR recovery is handled in ->op_map, to guarantee
43 * that recovered MRs are owned by a sending RPC, and not one where
44 * ->op_unmap could fire at the same time transport reconnect is
47 * When the underlying transport disconnects, MRs are left in one of
50 * INVALID: The MR was not in use before the QP entered ERROR state.
52 * VALID: The MR was registered before the QP entered ERROR state.
54 * FLUSHED_FR: The MR was being registered when the QP entered ERROR
55 * state, and the pending WR was flushed.
57 * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR
58 * state, and the pending WR was flushed.
60 * When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered
61 * with ib_dereg_mr and then are re-initialized. Because MR recovery
62 * allocates fresh resources, it is deferred to a workqueue, and the
63 * recovered MRs are placed back on the rb_mrs list when recovery is
64 * complete. frwr_op_map allocates another MR for the current RPC while
65 * the broken MR is reset.
67 * To ensure that frwr_op_map doesn't encounter an MR that is marked
68 * INVALID but that is about to be flushed due to a previous transport
69 * disconnect, the transport connect worker attempts to drain all
70 * pending send queue WRs before the transport is reconnected.
73 #include <linux/sunrpc/rpc_rdma.h>
74 #include <linux/sunrpc/svc_rdma.h>
76 #include "xprt_rdma.h"
77 #include <trace/events/rpcrdma.h>
79 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
80 # define RPCDBG_FACILITY RPCDBG_TRANS
84 frwr_is_supported(struct rpcrdma_ia *ia)
86 struct ib_device_attr *attrs = &ia->ri_device->attrs;
88 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
89 goto out_not_supported;
90 if (attrs->max_fast_reg_page_list_len == 0)
91 goto out_not_supported;
95 pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
101 frwr_op_release_mr(struct rpcrdma_mr *mr)
105 rc = ib_dereg_mr(mr->frwr.fr_mr);
107 pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
113 /* MRs are dynamically allocated, so simply clean up and release the MR.
114 * A replacement MR will subsequently be allocated on demand.
117 frwr_mr_recycle_worker(struct work_struct *work)
119 struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, mr_recycle);
120 struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
122 trace_xprtrdma_mr_recycle(mr);
124 if (mr->mr_dir != DMA_NONE) {
125 trace_xprtrdma_mr_unmap(mr);
126 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
127 mr->mr_sg, mr->mr_nents, mr->mr_dir);
128 mr->mr_dir = DMA_NONE;
131 spin_lock(&r_xprt->rx_buf.rb_mrlock);
132 list_del(&mr->mr_all);
133 r_xprt->rx_stats.mrs_recycled++;
134 spin_unlock(&r_xprt->rx_buf.rb_mrlock);
135 frwr_op_release_mr(mr);
139 frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
141 unsigned int depth = ia->ri_max_frwr_depth;
142 struct rpcrdma_frwr *frwr = &mr->frwr;
145 frwr->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
146 if (IS_ERR(frwr->fr_mr))
149 mr->mr_sg = kcalloc(depth, sizeof(*mr->mr_sg), GFP_KERNEL);
153 frwr->fr_state = FRWR_IS_INVALID;
154 mr->mr_dir = DMA_NONE;
155 INIT_LIST_HEAD(&mr->mr_list);
156 INIT_WORK(&mr->mr_recycle, frwr_mr_recycle_worker);
157 sg_init_table(mr->mr_sg, depth);
158 init_completion(&frwr->fr_linv_done);
162 rc = PTR_ERR(frwr->fr_mr);
163 dprintk("RPC: %s: ib_alloc_mr status %i\n",
169 dprintk("RPC: %s: sg allocation failure\n",
171 ib_dereg_mr(frwr->fr_mr);
176 * ep->rep_attr.cap.max_send_wr
177 * ep->rep_attr.cap.max_recv_wr
178 * cdata->max_requests
181 * And these FRWR-related fields:
182 * ia->ri_max_frwr_depth
186 frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
187 struct rpcrdma_create_data_internal *cdata)
189 struct ib_device_attr *attrs = &ia->ri_device->attrs;
190 int max_qp_wr, depth, delta;
192 ia->ri_mrtype = IB_MR_TYPE_MEM_REG;
193 if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
194 ia->ri_mrtype = IB_MR_TYPE_SG_GAPS;
196 /* Quirk: Some devices advertise a large max_fast_reg_page_list_len
197 * capability, but perform optimally when the MRs are not larger
200 if (attrs->max_sge_rd > 1)
201 ia->ri_max_frwr_depth = attrs->max_sge_rd;
203 ia->ri_max_frwr_depth = attrs->max_fast_reg_page_list_len;
204 if (ia->ri_max_frwr_depth > RPCRDMA_MAX_DATA_SEGS)
205 ia->ri_max_frwr_depth = RPCRDMA_MAX_DATA_SEGS;
206 dprintk("RPC: %s: max FR page list depth = %u\n",
207 __func__, ia->ri_max_frwr_depth);
209 /* Add room for frwr register and invalidate WRs.
210 * 1. FRWR reg WR for head
211 * 2. FRWR invalidate WR for head
212 * 3. N FRWR reg WRs for pagelist
213 * 4. N FRWR invalidate WRs for pagelist
214 * 5. FRWR reg WR for tail
215 * 6. FRWR invalidate WR for tail
216 * 7. The RDMA_SEND WR
220 /* Calculate N if the device max FRWR depth is smaller than
221 * RPCRDMA_MAX_DATA_SEGS.
223 if (ia->ri_max_frwr_depth < RPCRDMA_MAX_DATA_SEGS) {
224 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frwr_depth;
226 depth += 2; /* FRWR reg + invalidate */
227 delta -= ia->ri_max_frwr_depth;
231 max_qp_wr = ia->ri_device->attrs.max_qp_wr;
232 max_qp_wr -= RPCRDMA_BACKWARD_WRS;
234 if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
236 if (cdata->max_requests > max_qp_wr)
237 cdata->max_requests = max_qp_wr;
238 ep->rep_attr.cap.max_send_wr = cdata->max_requests * depth;
239 if (ep->rep_attr.cap.max_send_wr > max_qp_wr) {
240 cdata->max_requests = max_qp_wr / depth;
241 if (!cdata->max_requests)
243 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
246 ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
247 ep->rep_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
248 ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
249 ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
250 ep->rep_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
252 ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
253 ia->ri_max_frwr_depth);
254 /* Reply chunks require segments for head and tail buffers */
255 ia->ri_max_segs += 2;
256 if (ia->ri_max_segs > RPCRDMA_MAX_HDR_SEGS)
257 ia->ri_max_segs = RPCRDMA_MAX_HDR_SEGS;
261 /* FRWR mode conveys a list of pages per chunk segment. The
262 * maximum length of that list is the FRWR page list depth.
265 frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
267 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
269 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
270 (ia->ri_max_segs - 2) * ia->ri_max_frwr_depth);
274 __frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr)
276 if (wc->status != IB_WC_WR_FLUSH_ERR)
277 pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
278 wr, ib_wc_status_msg(wc->status),
279 wc->status, wc->vendor_err);
283 * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
284 * @cq: completion queue (ignored)
289 frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
291 struct ib_cqe *cqe = wc->wr_cqe;
292 struct rpcrdma_frwr *frwr =
293 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
295 /* WARNING: Only wr_cqe and status are reliable at this point */
296 if (wc->status != IB_WC_SUCCESS) {
297 frwr->fr_state = FRWR_FLUSHED_FR;
298 __frwr_sendcompletion_flush(wc, "fastreg");
300 trace_xprtrdma_wc_fastreg(wc, frwr);
304 * frwr_wc_localinv - Invoked by RDMA provider for a flushed LocalInv WC
305 * @cq: completion queue (ignored)
310 frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
312 struct ib_cqe *cqe = wc->wr_cqe;
313 struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr,
316 /* WARNING: Only wr_cqe and status are reliable at this point */
317 if (wc->status != IB_WC_SUCCESS) {
318 frwr->fr_state = FRWR_FLUSHED_LI;
319 __frwr_sendcompletion_flush(wc, "localinv");
321 trace_xprtrdma_wc_li(wc, frwr);
325 * frwr_wc_localinv_wake - Invoked by RDMA provider for a signaled LocalInv WC
326 * @cq: completion queue (ignored)
329 * Awaken anyone waiting for an MR to finish being fenced.
332 frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
334 struct ib_cqe *cqe = wc->wr_cqe;
335 struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr,
338 /* WARNING: Only wr_cqe and status are reliable at this point */
339 if (wc->status != IB_WC_SUCCESS) {
340 frwr->fr_state = FRWR_FLUSHED_LI;
341 __frwr_sendcompletion_flush(wc, "localinv");
343 complete(&frwr->fr_linv_done);
344 trace_xprtrdma_wc_li_wake(wc, frwr);
347 /* Post a REG_MR Work Request to register a memory region
348 * for remote access via RDMA READ or RDMA WRITE.
350 static struct rpcrdma_mr_seg *
351 frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
352 int nsegs, bool writing, struct rpcrdma_mr **out)
354 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
355 bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS;
356 struct rpcrdma_frwr *frwr;
357 struct rpcrdma_mr *mr;
359 struct ib_reg_wr *reg_wr;
366 rpcrdma_mr_recycle(mr);
367 mr = rpcrdma_mr_get(r_xprt);
369 return ERR_PTR(-EAGAIN);
370 } while (mr->frwr.fr_state != FRWR_IS_INVALID);
372 frwr->fr_state = FRWR_IS_VALID;
374 if (nsegs > ia->ri_max_frwr_depth)
375 nsegs = ia->ri_max_frwr_depth;
376 for (i = 0; i < nsegs;) {
378 sg_set_page(&mr->mr_sg[i],
381 offset_in_page(seg->mr_offset));
383 sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
390 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
391 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
394 mr->mr_dir = rpcrdma_data_dir(writing);
396 mr->mr_nents = ib_dma_map_sg(ia->ri_device, mr->mr_sg, i, mr->mr_dir);
399 trace_xprtrdma_mr_map(mr);
402 n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE);
403 if (unlikely(n != mr->mr_nents))
406 key = (u8)(ibmr->rkey & 0x000000FF);
407 ib_update_fast_reg_key(ibmr, ++key);
409 reg_wr = &frwr->fr_regwr;
411 reg_wr->key = ibmr->rkey;
412 reg_wr->access = writing ?
413 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
414 IB_ACCESS_REMOTE_READ;
416 mr->mr_handle = ibmr->rkey;
417 mr->mr_length = ibmr->length;
418 mr->mr_offset = ibmr->iova;
424 pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
426 frwr->fr_state = FRWR_IS_INVALID;
428 return ERR_PTR(-EIO);
431 pr_err("rpcrdma: failed to map mr %p (%d/%d)\n",
432 frwr->fr_mr, n, mr->mr_nents);
433 rpcrdma_mr_recycle(mr);
434 return ERR_PTR(-EIO);
437 /* Post Send WR containing the RPC Call message.
439 * For FRMR, chain any FastReg WRs to the Send WR. Only a
440 * single ib_post_send call is needed to register memory
441 * and then post the Send WR.
444 frwr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
446 struct ib_send_wr *post_wr;
447 struct rpcrdma_mr *mr;
449 post_wr = &req->rl_sendctx->sc_wr;
450 list_for_each_entry(mr, &req->rl_registered, mr_list) {
451 struct rpcrdma_frwr *frwr;
455 frwr->fr_cqe.done = frwr_wc_fastreg;
456 frwr->fr_regwr.wr.next = post_wr;
457 frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe;
458 frwr->fr_regwr.wr.num_sge = 0;
459 frwr->fr_regwr.wr.opcode = IB_WR_REG_MR;
460 frwr->fr_regwr.wr.send_flags = 0;
462 post_wr = &frwr->fr_regwr.wr;
465 /* If ib_post_send fails, the next ->send_request for
466 * @req will queue these MWs for recovery.
468 return ib_post_send(ia->ri_id->qp, post_wr, NULL);
471 /* Handle a remotely invalidated mr on the @mrs list
474 frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
476 struct rpcrdma_mr *mr;
478 list_for_each_entry(mr, mrs, mr_list)
479 if (mr->mr_handle == rep->rr_inv_rkey) {
480 list_del_init(&mr->mr_list);
481 trace_xprtrdma_mr_remoteinv(mr);
482 mr->frwr.fr_state = FRWR_IS_INVALID;
483 rpcrdma_mr_unmap_and_put(mr);
484 break; /* only one invalidated MR per RPC */
488 /* Invalidate all memory regions that were registered for "req".
490 * Sleeps until it is safe for the host CPU to access the
491 * previously mapped memory regions.
493 * Caller ensures that @mrs is not empty before the call. This
494 * function empties the list.
497 frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
499 struct ib_send_wr *first, **prev, *last;
500 const struct ib_send_wr *bad_wr;
501 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
502 struct rpcrdma_frwr *frwr;
503 struct rpcrdma_mr *mr;
506 /* ORDER: Invalidate all of the MRs first
508 * Chain the LOCAL_INV Work Requests and post them with
509 * a single ib_post_send() call.
514 list_for_each_entry(mr, mrs, mr_list) {
515 mr->frwr.fr_state = FRWR_IS_INVALID;
518 trace_xprtrdma_mr_localinv(mr);
520 frwr->fr_cqe.done = frwr_wc_localinv;
521 last = &frwr->fr_invwr;
522 memset(last, 0, sizeof(*last));
523 last->wr_cqe = &frwr->fr_cqe;
524 last->opcode = IB_WR_LOCAL_INV;
525 last->ex.invalidate_rkey = mr->mr_handle;
534 /* Strong send queue ordering guarantees that when the
535 * last WR in the chain completes, all WRs in the chain
538 last->send_flags = IB_SEND_SIGNALED;
539 frwr->fr_cqe.done = frwr_wc_localinv_wake;
540 reinit_completion(&frwr->fr_linv_done);
542 /* Transport disconnect drains the receive CQ before it
543 * replaces the QP. The RPC reply handler won't call us
544 * unless ri_id->qp is a valid pointer.
546 r_xprt->rx_stats.local_inv_needed++;
548 rc = ib_post_send(ia->ri_id->qp, first, &bad_wr);
550 wait_for_completion(&frwr->fr_linv_done);
554 /* ORDER: Now DMA unmap all of the MRs, and return
555 * them to the free MR list.
558 while (!list_empty(mrs)) {
559 mr = rpcrdma_mr_pop(mrs);
560 rpcrdma_mr_unmap_and_put(mr);
565 pr_err("rpcrdma: FRWR invalidate ib_post_send returned %i\n", rc);
567 /* Unmap and release the MRs in the LOCAL_INV WRs that did not
571 frwr = container_of(bad_wr, struct rpcrdma_frwr,
573 mr = container_of(frwr, struct rpcrdma_mr, frwr);
574 bad_wr = bad_wr->next;
576 list_del_init(&mr->mr_list);
577 rpcrdma_mr_recycle(mr);
581 const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
582 .ro_map = frwr_op_map,
583 .ro_send = frwr_op_send,
584 .ro_reminv = frwr_op_reminv,
585 .ro_unmap_sync = frwr_op_unmap_sync,
586 .ro_open = frwr_op_open,
587 .ro_maxpages = frwr_op_maxpages,
588 .ro_init_mr = frwr_op_init_mr,
589 .ro_release_mr = frwr_op_release_mr,
590 .ro_displayname = "frwr",
591 .ro_send_w_inv_ok = RPCRDMA_CMP_F_SND_W_INV_OK,