2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
6 /* Lightweight memory registration using Fast Memory Regions (FMR).
7 * Referred to sometimes as MTHCAFMR mode.
9 * FMR uses synchronous memory registration and deregistration.
10 * FMR registration is known to be fast, but FMR deregistration
11 * can take tens of usecs to complete.
16 * A Memory Region is prepared for RDMA READ or WRITE using the
17 * ib_map_phys_fmr verb (fmr_op_map). When the RDMA operation is
18 * finished, the Memory Region is unmapped using the ib_unmap_fmr
19 * verb (fmr_op_unmap).
24 * After a transport reconnect, fmr_op_map re-uses the MR already
25 * allocated for the RPC, but generates a fresh rkey then maps the
26 * MR again. This process is synchronous.
29 #include "xprt_rdma.h"
31 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
32 # define RPCDBG_FACILITY RPCDBG_TRANS
35 /* Maximum scatter/gather per FMR */
36 #define RPCRDMA_MAX_FMR_SGES (64)
39 fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
40 struct rpcrdma_create_data_internal *cdata)
42 struct ib_device_attr *devattr = &ia->ri_devattr;
45 /* Obtain an lkey to use for the regbufs, which are
46 * protected from remote access.
48 if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
49 ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
51 mr = ib_get_dma_mr(ia->ri_pd, IB_ACCESS_LOCAL_WRITE);
53 pr_err("%s: ib_get_dma_mr for failed with %lX\n",
54 __func__, PTR_ERR(mr));
57 ia->ri_dma_lkey = ia->ri_dma_mr->lkey;
64 /* FMR mode conveys up to 64 pages of payload per chunk segment.
67 fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
69 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
70 rpcrdma_max_segments(r_xprt) * RPCRDMA_MAX_FMR_SGES);
74 fmr_op_init(struct rpcrdma_xprt *r_xprt)
76 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
77 int mr_access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ;
78 struct ib_fmr_attr fmr_attr = {
79 .max_pages = RPCRDMA_MAX_FMR_SGES,
81 .page_shift = PAGE_SHIFT
83 struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
87 spin_lock_init(&buf->rb_mwlock);
88 INIT_LIST_HEAD(&buf->rb_mws);
89 INIT_LIST_HEAD(&buf->rb_all);
91 i = max_t(int, RPCRDMA_MAX_DATA_SEGS / RPCRDMA_MAX_FMR_SGES, 1);
92 i += 2; /* head + tail */
93 i *= buf->rb_max_requests; /* one set for each RPC slot */
94 dprintk("RPC: %s: initalizing %d FMRs\n", __func__, i);
98 r = kzalloc(sizeof(*r), GFP_KERNEL);
102 r->r.fmr.physaddrs = kmalloc(RPCRDMA_MAX_FMR_SGES *
103 sizeof(u64), GFP_KERNEL);
104 if (!r->r.fmr.physaddrs)
107 r->r.fmr.fmr = ib_alloc_fmr(pd, mr_access_flags, &fmr_attr);
108 if (IS_ERR(r->r.fmr.fmr))
111 list_add(&r->mw_list, &buf->rb_mws);
112 list_add(&r->mw_all, &buf->rb_all);
117 rc = PTR_ERR(r->r.fmr.fmr);
118 dprintk("RPC: %s: ib_alloc_fmr status %i\n", __func__, rc);
119 kfree(r->r.fmr.physaddrs);
127 __fmr_unmap(struct rpcrdma_mw *r)
131 list_add(&r->r.fmr.fmr->list, &l);
132 return ib_unmap_fmr(&l);
135 /* Use the ib_map_phys_fmr() verb to register a memory region
136 * for remote access via RDMA READ or RDMA WRITE.
139 fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
140 int nsegs, bool writing)
142 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
143 struct ib_device *device = ia->ri_device;
144 enum dma_data_direction direction = rpcrdma_data_dir(writing);
145 struct rpcrdma_mr_seg *seg1 = seg;
146 int len, pageoff, i, rc;
147 struct rpcrdma_mw *mw;
152 mw = rpcrdma_get_mw(r_xprt);
156 /* this is a retransmit; generate a fresh rkey */
157 rc = __fmr_unmap(mw);
162 pageoff = offset_in_page(seg1->mr_offset);
163 seg1->mr_offset -= pageoff; /* start of page */
164 seg1->mr_len += pageoff;
166 if (nsegs > RPCRDMA_MAX_FMR_SGES)
167 nsegs = RPCRDMA_MAX_FMR_SGES;
168 for (i = 0; i < nsegs;) {
169 rpcrdma_map_one(device, seg, direction);
170 mw->r.fmr.physaddrs[i] = seg->mr_dma;
174 /* Check for holes */
175 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
176 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
180 rc = ib_map_phys_fmr(mw->r.fmr.fmr, mw->r.fmr.physaddrs,
186 seg1->mr_rkey = mw->r.fmr.fmr->rkey;
187 seg1->mr_base = seg1->mr_dma + pageoff;
193 dprintk("RPC: %s: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
194 __func__, len, (unsigned long long)seg1->mr_dma,
197 rpcrdma_unmap_one(device, --seg);
201 /* Use the ib_unmap_fmr() verb to prevent further remote
202 * access via RDMA READ or RDMA WRITE.
205 fmr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
207 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
208 struct rpcrdma_mr_seg *seg1 = seg;
209 struct rpcrdma_mw *mw = seg1->rl_mw;
210 int rc, nsegs = seg->mr_nsegs;
212 dprintk("RPC: %s: FMR %p\n", __func__, mw);
215 while (seg1->mr_nsegs--)
216 rpcrdma_unmap_one(ia->ri_device, seg++);
217 rc = __fmr_unmap(mw);
220 rpcrdma_put_mw(r_xprt, mw);
224 /* The FMR is abandoned, but remains in rb_all. fmr_op_destroy
225 * will attempt to release it when the transport is destroyed.
227 dprintk("RPC: %s: ib_unmap_fmr status %i\n", __func__, rc);
232 fmr_op_destroy(struct rpcrdma_buffer *buf)
234 struct rpcrdma_mw *r;
237 while (!list_empty(&buf->rb_all)) {
238 r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
239 list_del(&r->mw_all);
240 kfree(r->r.fmr.physaddrs);
242 rc = ib_dealloc_fmr(r->r.fmr.fmr);
244 dprintk("RPC: %s: ib_dealloc_fmr failed %i\n",
251 const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
252 .ro_map = fmr_op_map,
253 .ro_unmap = fmr_op_unmap,
254 .ro_open = fmr_op_open,
255 .ro_maxpages = fmr_op_maxpages,
256 .ro_init = fmr_op_init,
257 .ro_destroy = fmr_op_destroy,
258 .ro_displayname = "fmr",