1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2015, 2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
7 /* Lightweight memory registration using Fast Registration Work
10 * FRWR features ordered asynchronous registration and invalidation
11 * of arbitrarily-sized memory regions. This is the fastest and safest
12 * but most complex memory registration mode.
17 * A Memory Region is prepared for RDMA Read or Write using a FAST_REG
18 * Work Request (frwr_map). When the RDMA operation is finished, this
19 * Memory Region is invalidated using a LOCAL_INV Work Request
20 * (frwr_unmap_async and frwr_unmap_sync).
22 * Typically FAST_REG Work Requests are not signaled, and neither are
23 * RDMA Send Work Requests (with the exception of signaling occasionally
24 * to prevent provider work queue overflows). This greatly reduces HCA
30 * frwr_map and frwr_unmap_* cannot run at the same time the transport
31 * connect worker is running. The connect worker holds the transport
32 * send lock, just as ->send_request does. This prevents frwr_map and
33 * the connect worker from running concurrently. When a connection is
34 * closed, the Receive completion queue is drained before the allowing
35 * the connect worker to get control. This prevents frwr_unmap and the
36 * connect worker from running concurrently.
38 * When the underlying transport disconnects, MRs that are in flight
39 * are flushed and are likely unusable. Thus all MRs are destroyed.
40 * New MRs are created on demand.
43 #include <linux/sunrpc/rpc_rdma.h>
44 #include <linux/sunrpc/svc_rdma.h>
46 #include "xprt_rdma.h"
47 #include <trace/events/rpcrdma.h>
49 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
50 # define RPCDBG_FACILITY RPCDBG_TRANS
54 * frwr_release_mr - Destroy one MR
55 * @mr: MR allocated by frwr_init_mr
58 void frwr_release_mr(struct rpcrdma_mr *mr)
62 rc = ib_dereg_mr(mr->frwr.fr_mr);
64 trace_xprtrdma_frwr_dereg(mr, rc);
69 static void frwr_mr_recycle(struct rpcrdma_mr *mr)
71 struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
73 trace_xprtrdma_mr_recycle(mr);
75 if (mr->mr_dir != DMA_NONE) {
76 trace_xprtrdma_mr_unmap(mr);
77 ib_dma_unmap_sg(r_xprt->rx_ia.ri_id->device,
78 mr->mr_sg, mr->mr_nents, mr->mr_dir);
79 mr->mr_dir = DMA_NONE;
82 spin_lock(&r_xprt->rx_buf.rb_lock);
83 list_del(&mr->mr_all);
84 r_xprt->rx_stats.mrs_recycled++;
85 spin_unlock(&r_xprt->rx_buf.rb_lock);
90 /* frwr_reset - Place MRs back on the free list
91 * @req: request to reset
93 * Used after a failed marshal. For FRWR, this means the MRs
94 * don't have to be fully released and recreated.
96 * NB: This is safe only as long as none of @req's MRs are
97 * involved with an ongoing asynchronous FAST_REG or LOCAL_INV
100 void frwr_reset(struct rpcrdma_req *req)
102 struct rpcrdma_mr *mr;
104 while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
109 * frwr_init_mr - Initialize one MR
110 * @ia: interface adapter
111 * @mr: generic MR to prepare for FRWR
113 * Returns zero if successful. Otherwise a negative errno
116 int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
118 unsigned int depth = ia->ri_max_frwr_depth;
119 struct scatterlist *sg;
123 frmr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
127 sg = kcalloc(depth, sizeof(*sg), GFP_NOFS);
131 mr->frwr.fr_mr = frmr;
132 mr->mr_dir = DMA_NONE;
133 INIT_LIST_HEAD(&mr->mr_list);
134 init_completion(&mr->frwr.fr_linv_done);
136 sg_init_table(sg, depth);
142 trace_xprtrdma_frwr_alloc(mr, rc);
151 * frwr_query_device - Prepare a transport for use with FRWR
152 * @r_xprt: controlling transport instance
153 * @device: RDMA device to query
157 * ep->rep_max_requests
158 * ia->ri_max_rdma_segs
160 * And these FRWR-related fields:
161 * ia->ri_max_frwr_depth
165 * On success, returns zero.
166 * %-EINVAL - the device does not support FRWR memory registration
167 * %-ENOMEM - the device is not sufficiently capable for NFS/RDMA
169 int frwr_query_device(struct rpcrdma_xprt *r_xprt,
170 const struct ib_device *device)
172 const struct ib_device_attr *attrs = &device->attrs;
173 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
174 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
175 int max_qp_wr, depth, delta;
176 unsigned int max_sge;
178 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) ||
179 attrs->max_fast_reg_page_list_len == 0) {
180 pr_err("rpcrdma: 'frwr' mode is not supported by device %s\n",
185 max_sge = min_t(unsigned int, attrs->max_send_sge,
186 RPCRDMA_MAX_SEND_SGES);
187 if (max_sge < RPCRDMA_MIN_SEND_SGES) {
188 pr_err("rpcrdma: HCA provides only %u send SGEs\n", max_sge);
191 ep->rep_attr.cap.max_send_sge = max_sge;
192 ep->rep_attr.cap.max_recv_sge = 1;
194 ia->ri_mrtype = IB_MR_TYPE_MEM_REG;
195 if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
196 ia->ri_mrtype = IB_MR_TYPE_SG_GAPS;
198 /* Quirk: Some devices advertise a large max_fast_reg_page_list_len
199 * capability, but perform optimally when the MRs are not larger
202 if (attrs->max_sge_rd > RPCRDMA_MAX_HDR_SEGS)
203 ia->ri_max_frwr_depth = attrs->max_sge_rd;
205 ia->ri_max_frwr_depth = attrs->max_fast_reg_page_list_len;
206 if (ia->ri_max_frwr_depth > RPCRDMA_MAX_DATA_SEGS)
207 ia->ri_max_frwr_depth = RPCRDMA_MAX_DATA_SEGS;
209 /* Add room for frwr register and invalidate WRs.
210 * 1. FRWR reg WR for head
211 * 2. FRWR invalidate WR for head
212 * 3. N FRWR reg WRs for pagelist
213 * 4. N FRWR invalidate WRs for pagelist
214 * 5. FRWR reg WR for tail
215 * 6. FRWR invalidate WR for tail
216 * 7. The RDMA_SEND WR
220 /* Calculate N if the device max FRWR depth is smaller than
221 * RPCRDMA_MAX_DATA_SEGS.
223 if (ia->ri_max_frwr_depth < RPCRDMA_MAX_DATA_SEGS) {
224 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frwr_depth;
226 depth += 2; /* FRWR reg + invalidate */
227 delta -= ia->ri_max_frwr_depth;
231 max_qp_wr = attrs->max_qp_wr;
232 max_qp_wr -= RPCRDMA_BACKWARD_WRS;
234 if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
236 if (ep->rep_max_requests > max_qp_wr)
237 ep->rep_max_requests = max_qp_wr;
238 ep->rep_attr.cap.max_send_wr = ep->rep_max_requests * depth;
239 if (ep->rep_attr.cap.max_send_wr > max_qp_wr) {
240 ep->rep_max_requests = max_qp_wr / depth;
241 if (!ep->rep_max_requests)
243 ep->rep_attr.cap.max_send_wr = ep->rep_max_requests * depth;
245 ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
246 ep->rep_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
247 ep->rep_attr.cap.max_recv_wr = ep->rep_max_requests;
248 ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
249 ep->rep_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
251 ia->ri_max_rdma_segs =
252 DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ia->ri_max_frwr_depth);
253 /* Reply chunks require segments for head and tail buffers */
254 ia->ri_max_rdma_segs += 2;
255 if (ia->ri_max_rdma_segs > RPCRDMA_MAX_HDR_SEGS)
256 ia->ri_max_rdma_segs = RPCRDMA_MAX_HDR_SEGS;
258 /* Ensure the underlying device is capable of conveying the
259 * largest r/wsize NFS will ask for. This guarantees that
260 * failing over from one RDMA device to another will not
263 if ((ia->ri_max_rdma_segs * ia->ri_max_frwr_depth) < RPCRDMA_MAX_SEGS)
270 * frwr_map - Register a memory region
271 * @r_xprt: controlling transport
272 * @seg: memory region co-ordinates
273 * @nsegs: number of segments remaining
274 * @writing: true when RDMA Write will be used
275 * @xid: XID of RPC using the registered memory
278 * Prepare a REG_MR Work Request to register a memory region
279 * for remote access via RDMA READ or RDMA WRITE.
281 * Returns the next segment or a negative errno pointer.
282 * On success, @mr is filled in.
284 struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
285 struct rpcrdma_mr_seg *seg,
286 int nsegs, bool writing, __be32 xid,
287 struct rpcrdma_mr *mr)
289 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
290 struct ib_reg_wr *reg_wr;
295 if (nsegs > ia->ri_max_frwr_depth)
296 nsegs = ia->ri_max_frwr_depth;
297 for (i = 0; i < nsegs;) {
299 sg_set_page(&mr->mr_sg[i],
302 offset_in_page(seg->mr_offset));
304 sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
309 if (ia->ri_mrtype == IB_MR_TYPE_SG_GAPS)
311 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
312 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
315 mr->mr_dir = rpcrdma_data_dir(writing);
318 dma_nents = ib_dma_map_sg(ia->ri_id->device, mr->mr_sg, mr->mr_nents,
323 ibmr = mr->frwr.fr_mr;
324 n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE);
328 ibmr->iova &= 0x00000000ffffffff;
329 ibmr->iova |= ((u64)be32_to_cpu(xid)) << 32;
330 key = (u8)(ibmr->rkey & 0x000000FF);
331 ib_update_fast_reg_key(ibmr, ++key);
333 reg_wr = &mr->frwr.fr_regwr;
335 reg_wr->key = ibmr->rkey;
336 reg_wr->access = writing ?
337 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
338 IB_ACCESS_REMOTE_READ;
340 mr->mr_handle = ibmr->rkey;
341 mr->mr_length = ibmr->length;
342 mr->mr_offset = ibmr->iova;
343 trace_xprtrdma_mr_map(mr);
348 mr->mr_dir = DMA_NONE;
349 trace_xprtrdma_frwr_sgerr(mr, i);
350 return ERR_PTR(-EIO);
353 trace_xprtrdma_frwr_maperr(mr, n);
354 return ERR_PTR(-EIO);
358 * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
359 * @cq: completion queue (ignored)
363 static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
365 struct ib_cqe *cqe = wc->wr_cqe;
366 struct rpcrdma_frwr *frwr =
367 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
369 /* WARNING: Only wr_cqe and status are reliable at this point */
370 trace_xprtrdma_wc_fastreg(wc, frwr);
371 /* The MR will get recycled when the associated req is retransmitted */
375 * frwr_send - post Send WR containing the RPC Call message
376 * @ia: interface adapter
377 * @req: Prepared RPC Call
379 * For FRWR, chain any FastReg WRs to the Send WR. Only a
380 * single ib_post_send call is needed to register memory
381 * and then post the Send WR.
383 * Returns the result of ib_post_send.
385 int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
387 struct ib_send_wr *post_wr;
388 struct rpcrdma_mr *mr;
390 post_wr = &req->rl_wr;
391 list_for_each_entry(mr, &req->rl_registered, mr_list) {
392 struct rpcrdma_frwr *frwr;
396 frwr->fr_cqe.done = frwr_wc_fastreg;
397 frwr->fr_regwr.wr.next = post_wr;
398 frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe;
399 frwr->fr_regwr.wr.num_sge = 0;
400 frwr->fr_regwr.wr.opcode = IB_WR_REG_MR;
401 frwr->fr_regwr.wr.send_flags = 0;
403 post_wr = &frwr->fr_regwr.wr;
406 return ib_post_send(ia->ri_id->qp, post_wr, NULL);
410 * frwr_reminv - handle a remotely invalidated mr on the @mrs list
411 * @rep: Received reply
412 * @mrs: list of MRs to check
415 void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
417 struct rpcrdma_mr *mr;
419 list_for_each_entry(mr, mrs, mr_list)
420 if (mr->mr_handle == rep->rr_inv_rkey) {
421 list_del_init(&mr->mr_list);
422 trace_xprtrdma_mr_remoteinv(mr);
424 break; /* only one invalidated MR per RPC */
428 static void __frwr_release_mr(struct ib_wc *wc, struct rpcrdma_mr *mr)
430 if (wc->status != IB_WC_SUCCESS)
437 * frwr_wc_localinv - Invoked by RDMA provider for a LOCAL_INV WC
438 * @cq: completion queue (ignored)
442 static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
444 struct ib_cqe *cqe = wc->wr_cqe;
445 struct rpcrdma_frwr *frwr =
446 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
447 struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
449 /* WARNING: Only wr_cqe and status are reliable at this point */
450 trace_xprtrdma_wc_li(wc, frwr);
451 __frwr_release_mr(wc, mr);
455 * frwr_wc_localinv_wake - Invoked by RDMA provider for a LOCAL_INV WC
456 * @cq: completion queue (ignored)
459 * Awaken anyone waiting for an MR to finish being fenced.
461 static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
463 struct ib_cqe *cqe = wc->wr_cqe;
464 struct rpcrdma_frwr *frwr =
465 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
466 struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
468 /* WARNING: Only wr_cqe and status are reliable at this point */
469 trace_xprtrdma_wc_li_wake(wc, frwr);
470 __frwr_release_mr(wc, mr);
471 complete(&frwr->fr_linv_done);
475 * frwr_unmap_sync - invalidate memory regions that were registered for @req
476 * @r_xprt: controlling transport instance
477 * @req: rpcrdma_req with a non-empty list of MRs to process
479 * Sleeps until it is safe for the host CPU to access the previously mapped
480 * memory regions. This guarantees that registered MRs are properly fenced
481 * from the server before the RPC consumer accesses the data in them. It
482 * also ensures proper Send flow control: waking the next RPC waits until
483 * this RPC has relinquished all its Send Queue entries.
485 void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
487 struct ib_send_wr *first, **prev, *last;
488 const struct ib_send_wr *bad_wr;
489 struct rpcrdma_frwr *frwr;
490 struct rpcrdma_mr *mr;
493 /* ORDER: Invalidate all of the MRs first
495 * Chain the LOCAL_INV Work Requests and post them with
496 * a single ib_post_send() call.
500 while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
502 trace_xprtrdma_mr_localinv(mr);
503 r_xprt->rx_stats.local_inv_needed++;
506 frwr->fr_cqe.done = frwr_wc_localinv;
507 last = &frwr->fr_invwr;
509 last->wr_cqe = &frwr->fr_cqe;
510 last->sg_list = NULL;
512 last->opcode = IB_WR_LOCAL_INV;
513 last->send_flags = IB_SEND_SIGNALED;
514 last->ex.invalidate_rkey = mr->mr_handle;
520 /* Strong send queue ordering guarantees that when the
521 * last WR in the chain completes, all WRs in the chain
524 frwr->fr_cqe.done = frwr_wc_localinv_wake;
525 reinit_completion(&frwr->fr_linv_done);
527 /* Transport disconnect drains the receive CQ before it
528 * replaces the QP. The RPC reply handler won't call us
529 * unless ri_id->qp is a valid pointer.
532 rc = ib_post_send(r_xprt->rx_ia.ri_id->qp, first, &bad_wr);
534 /* The final LOCAL_INV WR in the chain is supposed to
535 * do the wake. If it was never posted, the wake will
536 * not happen, so don't wait in that case.
539 wait_for_completion(&frwr->fr_linv_done);
543 /* Recycle MRs in the LOCAL_INV chain that did not get posted.
545 trace_xprtrdma_post_linv(req, rc);
547 frwr = container_of(bad_wr, struct rpcrdma_frwr,
549 mr = container_of(frwr, struct rpcrdma_mr, frwr);
550 bad_wr = bad_wr->next;
552 list_del_init(&mr->mr_list);
558 * frwr_wc_localinv_done - Invoked by RDMA provider for a signaled LOCAL_INV WC
559 * @cq: completion queue (ignored)
563 static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc)
565 struct ib_cqe *cqe = wc->wr_cqe;
566 struct rpcrdma_frwr *frwr =
567 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
568 struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
569 struct rpcrdma_rep *rep = mr->mr_req->rl_reply;
571 /* WARNING: Only wr_cqe and status are reliable at this point */
572 trace_xprtrdma_wc_li_done(wc, frwr);
573 __frwr_release_mr(wc, mr);
575 /* Ensure @rep is generated before __frwr_release_mr */
577 rpcrdma_complete_rqst(rep);
581 * frwr_unmap_async - invalidate memory regions that were registered for @req
582 * @r_xprt: controlling transport instance
583 * @req: rpcrdma_req with a non-empty list of MRs to process
585 * This guarantees that registered MRs are properly fenced from the
586 * server before the RPC consumer accesses the data in them. It also
587 * ensures proper Send flow control: waking the next RPC waits until
588 * this RPC has relinquished all its Send Queue entries.
590 void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
592 struct ib_send_wr *first, *last, **prev;
593 const struct ib_send_wr *bad_wr;
594 struct rpcrdma_frwr *frwr;
595 struct rpcrdma_mr *mr;
598 /* Chain the LOCAL_INV Work Requests and post them with
599 * a single ib_post_send() call.
603 while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
605 trace_xprtrdma_mr_localinv(mr);
606 r_xprt->rx_stats.local_inv_needed++;
609 frwr->fr_cqe.done = frwr_wc_localinv;
610 last = &frwr->fr_invwr;
612 last->wr_cqe = &frwr->fr_cqe;
613 last->sg_list = NULL;
615 last->opcode = IB_WR_LOCAL_INV;
616 last->send_flags = IB_SEND_SIGNALED;
617 last->ex.invalidate_rkey = mr->mr_handle;
623 /* Strong send queue ordering guarantees that when the
624 * last WR in the chain completes, all WRs in the chain
625 * are complete. The last completion will wake up the
628 frwr->fr_cqe.done = frwr_wc_localinv_done;
630 /* Transport disconnect drains the receive CQ before it
631 * replaces the QP. The RPC reply handler won't call us
632 * unless ri_id->qp is a valid pointer.
635 rc = ib_post_send(r_xprt->rx_ia.ri_id->qp, first, &bad_wr);
639 /* Recycle MRs in the LOCAL_INV chain that did not get posted.
641 trace_xprtrdma_post_linv(req, rc);
643 frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr);
644 mr = container_of(frwr, struct rpcrdma_mr, frwr);
645 bad_wr = bad_wr->next;
650 /* The final LOCAL_INV WR in the chain is supposed to
651 * do the wake. If it was never posted, the wake will
652 * not happen, so wake here in that case.
654 rpcrdma_complete_rqst(req->rl_reply);