Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
a0ce85f5 CL |
2 | /* |
3 | * Copyright (c) 2015 Oracle. All rights reserved. | |
4 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. | |
5 | */ | |
6 | ||
7 | /* Lightweight memory registration using Fast Registration Work | |
8 | * Requests (FRWR). Also referred to sometimes as FRMR mode. | |
9 | * | |
10 | * FRWR features ordered asynchronous registration and deregistration | |
11 | * of arbitrarily sized memory regions. This is the fastest and safest | |
12 | * but most complex memory registration mode. | |
13 | */ | |
14 | ||
c14d86e5 CL |
15 | /* Normal operation |
16 | * | |
17 | * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG | |
18 | * Work Request (frmr_op_map). When the RDMA operation is finished, this | |
19 | * Memory Region is invalidated using a LOCAL_INV Work Request | |
20 | * (frmr_op_unmap). | |
21 | * | |
22 | * Typically these Work Requests are not signaled, and neither are RDMA | |
23 | * SEND Work Requests (with the exception of signaling occasionally to | |
24 | * prevent provider work queue overflows). This greatly reduces HCA | |
25 | * interrupt workload. | |
26 | * | |
27 | * As an optimization, frwr_op_unmap marks MRs INVALID before the | |
28 | * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on | |
29 | * rb_mws immediately so that no work (like managing a linked list | |
30 | * under a spinlock) is needed in the completion upcall. | |
31 | * | |
32 | * But this means that frwr_op_map() can occasionally encounter an MR | |
33 | * that is INVALID but the LOCAL_INV WR has not completed. Work Queue | |
34 | * ordering prevents a subsequent FAST_REG WR from executing against | |
35 | * that MR while it is still being invalidated. | |
36 | */ | |
37 | ||
38 | /* Transport recovery | |
39 | * | |
40 | * ->op_map and the transport connect worker cannot run at the same | |
41 | * time, but ->op_unmap can fire while the transport connect worker | |
42 | * is running. Thus MR recovery is handled in ->op_map, to guarantee | |
43 | * that recovered MRs are owned by a sending RPC, and not one where | |
44 | * ->op_unmap could fire at the same time transport reconnect is | |
45 | * being done. | |
46 | * | |
47 | * When the underlying transport disconnects, MRs are left in one of | |
62bdf94a | 48 | * four states: |
c14d86e5 CL |
49 | * |
50 | * INVALID: The MR was not in use before the QP entered ERROR state. | |
c14d86e5 CL |
51 | * |
52 | * VALID: The MR was registered before the QP entered ERROR state. | |
53 | * | |
62bdf94a CL |
54 | * FLUSHED_FR: The MR was being registered when the QP entered ERROR |
55 | * state, and the pending WR was flushed. | |
56 | * | |
57 | * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR | |
58 | * state, and the pending WR was flushed. | |
59 | * | |
60 | * When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered | |
61 | * with ib_dereg_mr and then are re-initialized. Because MR recovery | |
c14d86e5 CL |
62 | * allocates fresh resources, it is deferred to a workqueue, and the |
63 | * recovered MRs are placed back on the rb_mws list when recovery is | |
64 | * complete. frwr_op_map allocates another MR for the current RPC while | |
65 | * the broken MR is reset. | |
66 | * | |
67 | * To ensure that frwr_op_map doesn't encounter an MR that is marked | |
68 | * INVALID but that is about to be flushed due to a previous transport | |
69 | * disconnect, the transport connect worker attempts to drain all | |
70 | * pending send queue WRs before the transport is reconnected. | |
71 | */ | |
72 | ||
c8b920bb CL |
73 | #include <linux/sunrpc/rpc_rdma.h> |
74 | ||
a0ce85f5 CL |
75 | #include "xprt_rdma.h" |
76 | ||
77 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | |
78 | # define RPCDBG_FACILITY RPCDBG_TRANS | |
79 | #endif | |
80 | ||
b54054ca CL |
81 | bool |
82 | frwr_is_supported(struct rpcrdma_ia *ia) | |
83 | { | |
84 | struct ib_device_attr *attrs = &ia->ri_device->attrs; | |
85 | ||
86 | if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) | |
87 | goto out_not_supported; | |
88 | if (attrs->max_fast_reg_page_list_len == 0) | |
89 | goto out_not_supported; | |
90 | return true; | |
91 | ||
92 | out_not_supported: | |
93 | pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n", | |
94 | ia->ri_device->name); | |
95 | return false; | |
96 | } | |
97 | ||
d48b1d29 | 98 | static int |
e2ac236c | 99 | frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r) |
d48b1d29 | 100 | { |
e2ac236c | 101 | unsigned int depth = ia->ri_max_frmr_depth; |
d48b1d29 CL |
102 | struct rpcrdma_frmr *f = &r->frmr; |
103 | int rc; | |
104 | ||
5e9fc6a0 | 105 | f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth); |
d48b1d29 CL |
106 | if (IS_ERR(f->fr_mr)) |
107 | goto out_mr_err; | |
108 | ||
109 | r->mw_sg = kcalloc(depth, sizeof(*r->mw_sg), GFP_KERNEL); | |
110 | if (!r->mw_sg) | |
111 | goto out_list_err; | |
112 | ||
113 | sg_init_table(r->mw_sg, depth); | |
114 | init_completion(&f->fr_linv_done); | |
115 | return 0; | |
116 | ||
117 | out_mr_err: | |
118 | rc = PTR_ERR(f->fr_mr); | |
119 | dprintk("RPC: %s: ib_alloc_mr status %i\n", | |
120 | __func__, rc); | |
121 | return rc; | |
122 | ||
123 | out_list_err: | |
124 | rc = -ENOMEM; | |
125 | dprintk("RPC: %s: sg allocation failure\n", | |
126 | __func__); | |
127 | ib_dereg_mr(f->fr_mr); | |
128 | return rc; | |
129 | } | |
130 | ||
131 | static void | |
e2ac236c | 132 | frwr_op_release_mr(struct rpcrdma_mw *r) |
d48b1d29 CL |
133 | { |
134 | int rc; | |
135 | ||
9d6b0409 CL |
136 | /* Ensure MW is not on any rl_registered list */ |
137 | if (!list_empty(&r->mw_list)) | |
138 | list_del(&r->mw_list); | |
139 | ||
d48b1d29 CL |
140 | rc = ib_dereg_mr(r->frmr.fr_mr); |
141 | if (rc) | |
142 | pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n", | |
143 | r, rc); | |
144 | kfree(r->mw_sg); | |
e2ac236c | 145 | kfree(r); |
d48b1d29 CL |
146 | } |
147 | ||
d7a21c1b CL |
148 | static int |
149 | __frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r) | |
150 | { | |
151 | struct rpcrdma_frmr *f = &r->frmr; | |
152 | int rc; | |
153 | ||
154 | rc = ib_dereg_mr(f->fr_mr); | |
155 | if (rc) { | |
156 | pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n", | |
157 | rc, r); | |
158 | return rc; | |
159 | } | |
160 | ||
5e9fc6a0 | 161 | f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, |
d7a21c1b CL |
162 | ia->ri_max_frmr_depth); |
163 | if (IS_ERR(f->fr_mr)) { | |
164 | pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n", | |
165 | PTR_ERR(f->fr_mr), r); | |
166 | return PTR_ERR(f->fr_mr); | |
167 | } | |
168 | ||
eeb30613 | 169 | dprintk("RPC: %s: recovered FRMR %p\n", __func__, f); |
d7a21c1b CL |
170 | f->fr_state = FRMR_IS_INVALID; |
171 | return 0; | |
172 | } | |
173 | ||
505bbe64 | 174 | /* Reset of a single FRMR. Generate a fresh rkey by replacing the MR. |
505bbe64 | 175 | */ |
660bb497 | 176 | static void |
505bbe64 | 177 | frwr_op_recover_mr(struct rpcrdma_mw *mw) |
660bb497 | 178 | { |
62bdf94a | 179 | enum rpcrdma_frmr_state state = mw->frmr.fr_state; |
564471d2 | 180 | struct rpcrdma_xprt *r_xprt = mw->mw_xprt; |
660bb497 | 181 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
660bb497 CL |
182 | int rc; |
183 | ||
184 | rc = __frwr_reset_mr(ia, mw); | |
62bdf94a CL |
185 | if (state != FRMR_FLUSHED_LI) |
186 | ib_dma_unmap_sg(ia->ri_device, | |
187 | mw->mw_sg, mw->mw_nents, mw->mw_dir); | |
2ffc871a CL |
188 | if (rc) |
189 | goto out_release; | |
951e721c | 190 | |
505bbe64 CL |
191 | rpcrdma_put_mw(r_xprt, mw); |
192 | r_xprt->rx_stats.mrs_recovered++; | |
2ffc871a CL |
193 | return; |
194 | ||
195 | out_release: | |
196 | pr_err("rpcrdma: FRMR reset failed %d, %p release\n", rc, mw); | |
197 | r_xprt->rx_stats.mrs_orphaned++; | |
198 | ||
199 | spin_lock(&r_xprt->rx_buf.rb_mwlock); | |
200 | list_del(&mw->mw_all); | |
201 | spin_unlock(&r_xprt->rx_buf.rb_mwlock); | |
202 | ||
203 | frwr_op_release_mr(mw); | |
951e721c CL |
204 | } |
205 | ||
3968cb58 CL |
206 | static int |
207 | frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, | |
208 | struct rpcrdma_create_data_internal *cdata) | |
209 | { | |
5e9fc6a0 | 210 | struct ib_device_attr *attrs = &ia->ri_device->attrs; |
3968cb58 CL |
211 | int depth, delta; |
212 | ||
5e9fc6a0 CL |
213 | ia->ri_mrtype = IB_MR_TYPE_MEM_REG; |
214 | if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG) | |
215 | ia->ri_mrtype = IB_MR_TYPE_SG_GAPS; | |
216 | ||
3968cb58 CL |
217 | ia->ri_max_frmr_depth = |
218 | min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, | |
5e9fc6a0 | 219 | attrs->max_fast_reg_page_list_len); |
3968cb58 CL |
220 | dprintk("RPC: %s: device's max FR page list len = %u\n", |
221 | __func__, ia->ri_max_frmr_depth); | |
222 | ||
223 | /* Add room for frmr register and invalidate WRs. | |
224 | * 1. FRMR reg WR for head | |
225 | * 2. FRMR invalidate WR for head | |
226 | * 3. N FRMR reg WRs for pagelist | |
227 | * 4. N FRMR invalidate WRs for pagelist | |
228 | * 5. FRMR reg WR for tail | |
229 | * 6. FRMR invalidate WR for tail | |
230 | * 7. The RDMA_SEND WR | |
231 | */ | |
232 | depth = 7; | |
233 | ||
234 | /* Calculate N if the device max FRMR depth is smaller than | |
235 | * RPCRDMA_MAX_DATA_SEGS. | |
236 | */ | |
237 | if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) { | |
238 | delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth; | |
239 | do { | |
240 | depth += 2; /* FRMR reg + invalidate */ | |
241 | delta -= ia->ri_max_frmr_depth; | |
242 | } while (delta > 0); | |
243 | } | |
244 | ||
245 | ep->rep_attr.cap.max_send_wr *= depth; | |
5e9fc6a0 CL |
246 | if (ep->rep_attr.cap.max_send_wr > attrs->max_qp_wr) { |
247 | cdata->max_requests = attrs->max_qp_wr / depth; | |
3968cb58 CL |
248 | if (!cdata->max_requests) |
249 | return -EINVAL; | |
250 | ep->rep_attr.cap.max_send_wr = cdata->max_requests * | |
251 | depth; | |
252 | } | |
253 | ||
87cfb9a0 CL |
254 | ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS / |
255 | ia->ri_max_frmr_depth); | |
3968cb58 CL |
256 | return 0; |
257 | } | |
258 | ||
1c9351ee CL |
259 | /* FRWR mode conveys a list of pages per chunk segment. The |
260 | * maximum length of that list is the FRWR page list depth. | |
261 | */ | |
262 | static size_t | |
263 | frwr_op_maxpages(struct rpcrdma_xprt *r_xprt) | |
264 | { | |
265 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | |
266 | ||
267 | return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, | |
94931746 | 268 | RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frmr_depth); |
1c9351ee CL |
269 | } |
270 | ||
2fa8f88d | 271 | static void |
62bdf94a | 272 | __frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr) |
2fa8f88d | 273 | { |
2fa8f88d CL |
274 | if (wc->status != IB_WC_WR_FLUSH_ERR) |
275 | pr_err("rpcrdma: %s: %s (%u/0x%x)\n", | |
276 | wr, ib_wc_status_msg(wc->status), | |
277 | wc->status, wc->vendor_err); | |
278 | } | |
279 | ||
280 | /** | |
6afafa77 | 281 | * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC |
2fa8f88d CL |
282 | * @cq: completion queue (ignored) |
283 | * @wc: completed WR | |
c9918ff5 | 284 | * |
c9918ff5 | 285 | */ |
e46ac34c | 286 | static void |
2fa8f88d | 287 | frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) |
e46ac34c | 288 | { |
2fa8f88d CL |
289 | struct rpcrdma_frmr *frmr; |
290 | struct ib_cqe *cqe; | |
c9918ff5 | 291 | |
2fa8f88d CL |
292 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
293 | if (wc->status != IB_WC_SUCCESS) { | |
294 | cqe = wc->wr_cqe; | |
295 | frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); | |
62bdf94a CL |
296 | frmr->fr_state = FRMR_FLUSHED_FR; |
297 | __frwr_sendcompletion_flush(wc, "fastreg"); | |
2fa8f88d | 298 | } |
e46ac34c CL |
299 | } |
300 | ||
2fa8f88d | 301 | /** |
6afafa77 | 302 | * frwr_wc_localinv - Invoked by RDMA provider for a flushed LocalInv WC |
2fa8f88d CL |
303 | * @cq: completion queue (ignored) |
304 | * @wc: completed WR | |
305 | * | |
306 | */ | |
c9918ff5 | 307 | static void |
2fa8f88d | 308 | frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc) |
c9918ff5 | 309 | { |
2fa8f88d CL |
310 | struct rpcrdma_frmr *frmr; |
311 | struct ib_cqe *cqe; | |
c9918ff5 | 312 | |
2fa8f88d CL |
313 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
314 | if (wc->status != IB_WC_SUCCESS) { | |
315 | cqe = wc->wr_cqe; | |
316 | frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); | |
62bdf94a CL |
317 | frmr->fr_state = FRMR_FLUSHED_LI; |
318 | __frwr_sendcompletion_flush(wc, "localinv"); | |
2fa8f88d CL |
319 | } |
320 | } | |
c9918ff5 | 321 | |
2fa8f88d | 322 | /** |
6afafa77 | 323 | * frwr_wc_localinv_wake - Invoked by RDMA provider for a signaled LocalInv WC |
2fa8f88d CL |
324 | * @cq: completion queue (ignored) |
325 | * @wc: completed WR | |
326 | * | |
327 | * Awaken anyone waiting for an MR to finish being fenced. | |
328 | */ | |
329 | static void | |
330 | frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) | |
331 | { | |
332 | struct rpcrdma_frmr *frmr; | |
333 | struct ib_cqe *cqe; | |
334 | ||
335 | /* WARNING: Only wr_cqe and status are reliable at this point */ | |
336 | cqe = wc->wr_cqe; | |
337 | frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); | |
62bdf94a CL |
338 | if (wc->status != IB_WC_SUCCESS) { |
339 | frmr->fr_state = FRMR_FLUSHED_LI; | |
340 | __frwr_sendcompletion_flush(wc, "localinv"); | |
341 | } | |
5690a22d | 342 | complete(&frmr->fr_linv_done); |
c9918ff5 CL |
343 | } |
344 | ||
564471d2 | 345 | /* Post a REG_MR Work Request to register a memory region |
9c1b4d77 CL |
346 | * for remote access via RDMA READ or RDMA WRITE. |
347 | */ | |
6748b0ca | 348 | static struct rpcrdma_mr_seg * |
9c1b4d77 | 349 | frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, |
9d6b0409 | 350 | int nsegs, bool writing, struct rpcrdma_mw **out) |
9c1b4d77 CL |
351 | { |
352 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | |
5e9fc6a0 | 353 | bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS; |
c14d86e5 CL |
354 | struct rpcrdma_mw *mw; |
355 | struct rpcrdma_frmr *frmr; | |
356 | struct ib_mr *mr; | |
3cf4e169 | 357 | struct ib_reg_wr *reg_wr; |
e622f2f4 | 358 | struct ib_send_wr *bad_wr; |
1f541895 | 359 | int rc, i, n; |
9c1b4d77 | 360 | u8 key; |
9c1b4d77 | 361 | |
9d6b0409 | 362 | mw = NULL; |
c14d86e5 CL |
363 | do { |
364 | if (mw) | |
505bbe64 | 365 | rpcrdma_defer_mr_recovery(mw); |
c14d86e5 CL |
366 | mw = rpcrdma_get_mw(r_xprt); |
367 | if (!mw) | |
6748b0ca | 368 | return ERR_PTR(-ENOBUFS); |
c882a655 CL |
369 | } while (mw->frmr.fr_state != FRMR_IS_INVALID); |
370 | frmr = &mw->frmr; | |
c14d86e5 | 371 | frmr->fr_state = FRMR_IS_VALID; |
4143f34e | 372 | mr = frmr->fr_mr; |
3cf4e169 | 373 | reg_wr = &frmr->fr_regwr; |
c14d86e5 | 374 | |
9c1b4d77 CL |
375 | if (nsegs > ia->ri_max_frmr_depth) |
376 | nsegs = ia->ri_max_frmr_depth; | |
4143f34e SG |
377 | for (i = 0; i < nsegs;) { |
378 | if (seg->mr_page) | |
564471d2 | 379 | sg_set_page(&mw->mw_sg[i], |
4143f34e SG |
380 | seg->mr_page, |
381 | seg->mr_len, | |
382 | offset_in_page(seg->mr_offset)); | |
383 | else | |
564471d2 | 384 | sg_set_buf(&mw->mw_sg[i], seg->mr_offset, |
4143f34e SG |
385 | seg->mr_len); |
386 | ||
9c1b4d77 CL |
387 | ++seg; |
388 | ++i; | |
5e9fc6a0 CL |
389 | if (holes_ok) |
390 | continue; | |
9c1b4d77 CL |
391 | if ((i < nsegs && offset_in_page(seg->mr_offset)) || |
392 | offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) | |
393 | break; | |
394 | } | |
564471d2 | 395 | mw->mw_dir = rpcrdma_data_dir(writing); |
4143f34e | 396 | |
1f541895 CL |
397 | mw->mw_nents = ib_dma_map_sg(ia->ri_device, mw->mw_sg, i, mw->mw_dir); |
398 | if (!mw->mw_nents) | |
564471d2 CL |
399 | goto out_dmamap_err; |
400 | ||
401 | n = ib_map_mr_sg(mr, mw->mw_sg, mw->mw_nents, NULL, PAGE_SIZE); | |
402 | if (unlikely(n != mw->mw_nents)) | |
403 | goto out_mapmr_err; | |
4143f34e | 404 | |
edd31551 | 405 | dprintk("RPC: %s: Using frmr %p to map %u segments (%llu bytes)\n", |
eeb30613 | 406 | __func__, frmr, mw->mw_nents, mr->length); |
4143f34e | 407 | |
9c1b4d77 CL |
408 | key = (u8)(mr->rkey & 0x000000FF); |
409 | ib_update_fast_reg_key(mr, ++key); | |
4143f34e | 410 | |
3cf4e169 CL |
411 | reg_wr->wr.next = NULL; |
412 | reg_wr->wr.opcode = IB_WR_REG_MR; | |
2fa8f88d CL |
413 | frmr->fr_cqe.done = frwr_wc_fastreg; |
414 | reg_wr->wr.wr_cqe = &frmr->fr_cqe; | |
3cf4e169 CL |
415 | reg_wr->wr.num_sge = 0; |
416 | reg_wr->wr.send_flags = 0; | |
417 | reg_wr->mr = mr; | |
418 | reg_wr->key = mr->rkey; | |
419 | reg_wr->access = writing ? | |
420 | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : | |
421 | IB_ACCESS_REMOTE_READ; | |
9c1b4d77 | 422 | |
8d38de65 | 423 | rpcrdma_set_signaled(&r_xprt->rx_ep, ®_wr->wr); |
3cf4e169 | 424 | rc = ib_post_send(ia->ri_id->qp, ®_wr->wr, &bad_wr); |
9c1b4d77 CL |
425 | if (rc) |
426 | goto out_senderr; | |
427 | ||
9d6b0409 CL |
428 | mw->mw_handle = mr->rkey; |
429 | mw->mw_length = mr->length; | |
430 | mw->mw_offset = mr->iova; | |
4143f34e | 431 | |
9d6b0409 | 432 | *out = mw; |
6748b0ca | 433 | return seg; |
564471d2 CL |
434 | |
435 | out_dmamap_err: | |
1f541895 CL |
436 | pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n", |
437 | mw->mw_sg, i); | |
438 | frmr->fr_state = FRMR_IS_INVALID; | |
439 | rpcrdma_put_mw(r_xprt, mw); | |
6748b0ca | 440 | return ERR_PTR(-EIO); |
564471d2 CL |
441 | |
442 | out_mapmr_err: | |
1f541895 | 443 | pr_err("rpcrdma: failed to map mr %p (%d/%d)\n", |
564471d2 | 444 | frmr->fr_mr, n, mw->mw_nents); |
505bbe64 | 445 | rpcrdma_defer_mr_recovery(mw); |
6748b0ca | 446 | return ERR_PTR(-EIO); |
9c1b4d77 CL |
447 | |
448 | out_senderr: | |
7a89f9c6 | 449 | pr_err("rpcrdma: FRMR registration ib_post_send returned %i\n", rc); |
505bbe64 | 450 | rpcrdma_defer_mr_recovery(mw); |
6748b0ca | 451 | return ERR_PTR(-ENOTCONN); |
9c1b4d77 CL |
452 | } |
453 | ||
c9918ff5 CL |
454 | /* Invalidate all memory regions that were registered for "req". |
455 | * | |
456 | * Sleeps until it is safe for the host CPU to access the | |
457 | * previously mapped memory regions. | |
9d6b0409 | 458 | * |
451d26e1 CL |
459 | * Caller ensures that @mws is not empty before the call. This |
460 | * function empties the list. | |
c9918ff5 CL |
461 | */ |
462 | static void | |
451d26e1 | 463 | frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws) |
c9918ff5 | 464 | { |
a100fda1 | 465 | struct ib_send_wr *first, **prev, *last, *bad_wr; |
c9918ff5 | 466 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
c9918ff5 | 467 | struct rpcrdma_frmr *f; |
9a5c63e9 | 468 | struct rpcrdma_mw *mw; |
8d38de65 | 469 | int count, rc; |
c9918ff5 | 470 | |
451d26e1 | 471 | /* ORDER: Invalidate all of the MRs first |
c9918ff5 CL |
472 | * |
473 | * Chain the LOCAL_INV Work Requests and post them with | |
474 | * a single ib_post_send() call. | |
475 | */ | |
9d6b0409 | 476 | f = NULL; |
8d38de65 | 477 | count = 0; |
a100fda1 | 478 | prev = &first; |
451d26e1 | 479 | list_for_each_entry(mw, mws, mw_list) { |
a100fda1 CL |
480 | mw->frmr.fr_state = FRMR_IS_INVALID; |
481 | ||
4b196dc6 | 482 | if (mw->mw_flags & RPCRDMA_MW_F_RI) |
c8b920bb | 483 | continue; |
c8b920bb | 484 | |
a100fda1 CL |
485 | f = &mw->frmr; |
486 | dprintk("RPC: %s: invalidating frmr %p\n", | |
487 | __func__, f); | |
488 | ||
489 | f->fr_cqe.done = frwr_wc_localinv; | |
490 | last = &f->fr_invwr; | |
491 | memset(last, 0, sizeof(*last)); | |
492 | last->wr_cqe = &f->fr_cqe; | |
493 | last->opcode = IB_WR_LOCAL_INV; | |
494 | last->ex.invalidate_rkey = mw->mw_handle; | |
8d38de65 | 495 | count++; |
c9918ff5 | 496 | |
a100fda1 CL |
497 | *prev = last; |
498 | prev = &last->next; | |
c9918ff5 | 499 | } |
c8b920bb CL |
500 | if (!f) |
501 | goto unmap; | |
c9918ff5 CL |
502 | |
503 | /* Strong send queue ordering guarantees that when the | |
504 | * last WR in the chain completes, all WRs in the chain | |
505 | * are complete. | |
506 | */ | |
a100fda1 | 507 | last->send_flags = IB_SEND_SIGNALED; |
2fa8f88d CL |
508 | f->fr_cqe.done = frwr_wc_localinv_wake; |
509 | reinit_completion(&f->fr_linv_done); | |
8d38de65 CL |
510 | |
511 | /* Initialize CQ count, since there is always a signaled | |
512 | * WR being posted here. The new cqcount depends on how | |
513 | * many SQEs are about to be consumed. | |
514 | */ | |
515 | rpcrdma_init_cqcount(&r_xprt->rx_ep, count); | |
c9918ff5 CL |
516 | |
517 | /* Transport disconnect drains the receive CQ before it | |
518 | * replaces the QP. The RPC reply handler won't call us | |
519 | * unless ri_id->qp is a valid pointer. | |
520 | */ | |
c8b920bb | 521 | r_xprt->rx_stats.local_inv_needed++; |
8d75483a | 522 | bad_wr = NULL; |
a100fda1 | 523 | rc = ib_post_send(ia->ri_id->qp, first, &bad_wr); |
8d75483a CL |
524 | if (bad_wr != first) |
525 | wait_for_completion(&f->fr_linv_done); | |
d7a21c1b CL |
526 | if (rc) |
527 | goto reset_mrs; | |
c9918ff5 | 528 | |
451d26e1 | 529 | /* ORDER: Now DMA unmap all of the MRs, and return |
c9918ff5 CL |
530 | * them to the free MW list. |
531 | */ | |
b892a699 | 532 | unmap: |
451d26e1 CL |
533 | while (!list_empty(mws)) { |
534 | mw = rpcrdma_pop_mw(mws); | |
a100fda1 | 535 | dprintk("RPC: %s: DMA unmapping frmr %p\n", |
eeb30613 | 536 | __func__, &mw->frmr); |
564471d2 CL |
537 | ib_dma_unmap_sg(ia->ri_device, |
538 | mw->mw_sg, mw->mw_nents, mw->mw_dir); | |
d7a21c1b | 539 | rpcrdma_put_mw(r_xprt, mw); |
c9918ff5 | 540 | } |
d7a21c1b | 541 | return; |
c9918ff5 | 542 | |
d7a21c1b | 543 | reset_mrs: |
7a89f9c6 | 544 | pr_err("rpcrdma: FRMR invalidate ib_post_send returned %i\n", rc); |
6814baea | 545 | |
d7a21c1b | 546 | /* Find and reset the MRs in the LOCAL_INV WRs that did not |
8d75483a | 547 | * get posted. |
d7a21c1b | 548 | */ |
8d75483a CL |
549 | rpcrdma_init_cqcount(&r_xprt->rx_ep, -count); |
550 | while (bad_wr) { | |
551 | f = container_of(bad_wr, struct rpcrdma_frmr, | |
552 | fr_invwr); | |
553 | mw = container_of(f, struct rpcrdma_mw, frmr); | |
554 | ||
555 | __frwr_reset_mr(ia, mw); | |
556 | ||
557 | bad_wr = bad_wr->next; | |
d7a21c1b CL |
558 | } |
559 | goto unmap; | |
c9918ff5 | 560 | } |
6814baea | 561 | |
ead3f26e CL |
562 | /* Use a slow, safe mechanism to invalidate all memory regions |
563 | * that were registered for "req". | |
564 | */ | |
565 | static void | |
566 | frwr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, | |
567 | bool sync) | |
568 | { | |
ead3f26e | 569 | struct rpcrdma_mw *mw; |
c14d86e5 | 570 | |
9d6b0409 | 571 | while (!list_empty(&req->rl_registered)) { |
9a5c63e9 | 572 | mw = rpcrdma_pop_mw(&req->rl_registered); |
ead3f26e | 573 | if (sync) |
505bbe64 | 574 | frwr_op_recover_mr(mw); |
ead3f26e | 575 | else |
505bbe64 | 576 | rpcrdma_defer_mr_recovery(mw); |
ead3f26e | 577 | } |
6814baea CL |
578 | } |
579 | ||
a0ce85f5 | 580 | const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = { |
9c1b4d77 | 581 | .ro_map = frwr_op_map, |
c9918ff5 | 582 | .ro_unmap_sync = frwr_op_unmap_sync, |
ead3f26e | 583 | .ro_unmap_safe = frwr_op_unmap_safe, |
505bbe64 | 584 | .ro_recover_mr = frwr_op_recover_mr, |
3968cb58 | 585 | .ro_open = frwr_op_open, |
1c9351ee | 586 | .ro_maxpages = frwr_op_maxpages, |
e2ac236c CL |
587 | .ro_init_mr = frwr_op_init_mr, |
588 | .ro_release_mr = frwr_op_release_mr, | |
a0ce85f5 | 589 | .ro_displayname = "frwr", |
c8b920bb | 590 | .ro_send_w_inv_ok = RPCRDMA_CMP_F_SND_W_INV_OK, |
a0ce85f5 | 591 | }; |