Commit | Line | Data |
---|---|---|
a2268cfb | 1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause |
f58851e6 | 2 | /* |
62b56a67 | 3 | * Copyright (c) 2014-2017 Oracle. All rights reserved. |
c56c65fb TT |
4 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. |
5 | * | |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the BSD-type | |
10 | * license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or without | |
13 | * modification, are permitted provided that the following conditions | |
14 | * are met: | |
15 | * | |
16 | * Redistributions of source code must retain the above copyright | |
17 | * notice, this list of conditions and the following disclaimer. | |
18 | * | |
19 | * Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials provided | |
22 | * with the distribution. | |
23 | * | |
24 | * Neither the name of the Network Appliance, Inc. nor the names of | |
25 | * its contributors may be used to endorse or promote products | |
26 | * derived from this software without specific prior written | |
27 | * permission. | |
28 | * | |
29 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
30 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
31 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
32 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
33 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
34 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
35 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
36 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
37 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
38 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
39 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
f58851e6 TT |
40 | */ |
41 | ||
c56c65fb TT |
42 | /* |
43 | * verbs.c | |
44 | * | |
45 | * Encapsulates the major functions managing: | |
46 | * o adapters | |
47 | * o endpoints | |
48 | * o connections | |
49 | * o buffer memory | |
50 | */ | |
51 | ||
a6b7a407 | 52 | #include <linux/interrupt.h> |
5a0e3ad6 | 53 | #include <linux/slab.h> |
0dd39cae | 54 | #include <linux/sunrpc/addr.h> |
05c97466 | 55 | #include <linux/sunrpc/svc_rdma.h> |
ae72950a CL |
56 | |
57 | #include <asm-generic/barrier.h> | |
65866f82 | 58 | #include <asm/bitops.h> |
56a6bd15 | 59 | |
0a90487b | 60 | #include <rdma/ib_cm.h> |
c56c65fb | 61 | |
f58851e6 | 62 | #include "xprt_rdma.h" |
b6e717cb | 63 | #include <trace/events/rpcrdma.h> |
f58851e6 | 64 | |
c56c65fb TT |
65 | /* |
66 | * Globals/Macros | |
67 | */ | |
68 | ||
f895b252 | 69 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
c56c65fb TT |
70 | # define RPCDBG_FACILITY RPCDBG_TRANS |
71 | #endif | |
72 | ||
73 | /* | |
74 | * internal functions | |
75 | */ | |
efd81e90 | 76 | static void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc); |
96ceddea CL |
77 | static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt); |
78 | static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf); | |
7c8d9e7c | 79 | static int rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt, bool temp); |
bebd0318 | 80 | static void rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb); |
c56c65fb | 81 | |
d8f532d2 | 82 | struct workqueue_struct *rpcrdma_receive_wq __read_mostly; |
c56c65fb | 83 | |
fe97b47c CL |
84 | int |
85 | rpcrdma_alloc_wq(void) | |
c56c65fb | 86 | { |
fe97b47c | 87 | struct workqueue_struct *recv_wq; |
c56c65fb | 88 | |
fe97b47c | 89 | recv_wq = alloc_workqueue("xprtrdma_receive", |
ccede759 | 90 | WQ_MEM_RECLAIM | WQ_HIGHPRI, |
fe97b47c CL |
91 | 0); |
92 | if (!recv_wq) | |
93 | return -ENOMEM; | |
c56c65fb | 94 | |
fe97b47c CL |
95 | rpcrdma_receive_wq = recv_wq; |
96 | return 0; | |
c56c65fb TT |
97 | } |
98 | ||
fe97b47c CL |
99 | void |
100 | rpcrdma_destroy_wq(void) | |
f1a03b76 | 101 | { |
fe97b47c | 102 | struct workqueue_struct *wq; |
f1a03b76 | 103 | |
fe97b47c CL |
104 | if (rpcrdma_receive_wq) { |
105 | wq = rpcrdma_receive_wq; | |
106 | rpcrdma_receive_wq = NULL; | |
107 | destroy_workqueue(wq); | |
108 | } | |
f1a03b76 CL |
109 | } |
110 | ||
c56c65fb TT |
111 | static void |
112 | rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context) | |
113 | { | |
114 | struct rpcrdma_ep *ep = context; | |
643cf323 CL |
115 | struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt, |
116 | rx_ep); | |
c56c65fb | 117 | |
643cf323 | 118 | trace_xprtrdma_qp_error(r_xprt, event); |
2f6922ca CL |
119 | pr_err("rpcrdma: %s on device %s ep %p\n", |
120 | ib_event_msg(event->event), event->device->name, context); | |
121 | ||
c56c65fb TT |
122 | if (ep->rep_connected == 1) { |
123 | ep->rep_connected = -EIO; | |
afadc468 | 124 | rpcrdma_conn_func(ep); |
c56c65fb TT |
125 | wake_up_all(&ep->rep_connect_wait); |
126 | } | |
127 | } | |
128 | ||
2fa8f88d CL |
129 | /** |
130 | * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC | |
131 | * @cq: completion queue (ignored) | |
132 | * @wc: completed WR | |
133 | * | |
fc664485 CL |
134 | */ |
135 | static void | |
2fa8f88d | 136 | rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) |
fc664485 | 137 | { |
ae72950a CL |
138 | struct ib_cqe *cqe = wc->wr_cqe; |
139 | struct rpcrdma_sendctx *sc = | |
140 | container_of(cqe, struct rpcrdma_sendctx, sc_cqe); | |
141 | ||
2fa8f88d | 142 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
ab03eff5 | 143 | trace_xprtrdma_wc_send(sc, wc); |
2fa8f88d CL |
144 | if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR) |
145 | pr_err("rpcrdma: Send: %s (%u/0x%x)\n", | |
146 | ib_wc_status_msg(wc->status), | |
147 | wc->status, wc->vendor_err); | |
ae72950a CL |
148 | |
149 | rpcrdma_sendctx_put_locked(sc); | |
fc664485 | 150 | } |
c56c65fb | 151 | |
552bf225 | 152 | /** |
1519e969 | 153 | * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC |
552bf225 CL |
154 | * @cq: completion queue (ignored) |
155 | * @wc: completed WR | |
156 | * | |
157 | */ | |
fc664485 | 158 | static void |
1519e969 | 159 | rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) |
fc664485 | 160 | { |
552bf225 CL |
161 | struct ib_cqe *cqe = wc->wr_cqe; |
162 | struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep, | |
163 | rr_cqe); | |
fc664485 | 164 | |
8502427c | 165 | /* WARNING: Only wr_id and status are reliable at this point */ |
0e0b854c | 166 | trace_xprtrdma_wc_receive(wc); |
8502427c CL |
167 | if (wc->status != IB_WC_SUCCESS) |
168 | goto out_fail; | |
fc664485 | 169 | |
8502427c | 170 | /* status == SUCCESS means all fields in wc are trustworthy */ |
96f8778f | 171 | rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len); |
c8b920bb CL |
172 | rep->rr_wc_flags = wc->wc_flags; |
173 | rep->rr_inv_rkey = wc->ex.invalidate_rkey; | |
174 | ||
91a10c52 | 175 | ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf), |
6b1184cd | 176 | rdmab_addr(rep->rr_rdmabuf), |
e2a67190 | 177 | wc->byte_len, DMA_FROM_DEVICE); |
23826c7a | 178 | |
fc664485 | 179 | out_schedule: |
d8f532d2 | 180 | rpcrdma_reply_handler(rep); |
8502427c | 181 | return; |
fe97b47c | 182 | |
8502427c CL |
183 | out_fail: |
184 | if (wc->status != IB_WC_WR_FLUSH_ERR) | |
552bf225 CL |
185 | pr_err("rpcrdma: Recv: %s (%u/0x%x)\n", |
186 | ib_wc_status_msg(wc->status), | |
187 | wc->status, wc->vendor_err); | |
e2a67190 | 188 | rpcrdma_set_xdrlen(&rep->rr_hdrbuf, 0); |
8502427c | 189 | goto out_schedule; |
fc664485 CL |
190 | } |
191 | ||
87cfb9a0 CL |
192 | static void |
193 | rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt, | |
194 | struct rdma_conn_param *param) | |
195 | { | |
196 | struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; | |
197 | const struct rpcrdma_connect_private *pmsg = param->private_data; | |
198 | unsigned int rsize, wsize; | |
199 | ||
c8b920bb | 200 | /* Default settings for RPC-over-RDMA Version One */ |
b5f0afbe | 201 | r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize; |
87cfb9a0 CL |
202 | rsize = RPCRDMA_V1_DEF_INLINE_SIZE; |
203 | wsize = RPCRDMA_V1_DEF_INLINE_SIZE; | |
204 | ||
205 | if (pmsg && | |
206 | pmsg->cp_magic == rpcrdma_cmp_magic && | |
207 | pmsg->cp_version == RPCRDMA_CMP_VERSION) { | |
c95a3c6b | 208 | r_xprt->rx_ia.ri_implicit_roundup = true; |
87cfb9a0 CL |
209 | rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size); |
210 | wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size); | |
211 | } | |
212 | ||
213 | if (rsize < cdata->inline_rsize) | |
214 | cdata->inline_rsize = rsize; | |
215 | if (wsize < cdata->inline_wsize) | |
216 | cdata->inline_wsize = wsize; | |
6d6bf72d CL |
217 | dprintk("RPC: %s: max send %u, max recv %u\n", |
218 | __func__, cdata->inline_wsize, cdata->inline_rsize); | |
87cfb9a0 CL |
219 | rpcrdma_set_max_header_sizes(r_xprt); |
220 | } | |
221 | ||
c56c65fb TT |
222 | static int |
223 | rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event) | |
224 | { | |
225 | struct rpcrdma_xprt *xprt = id->context; | |
226 | struct rpcrdma_ia *ia = &xprt->rx_ia; | |
227 | struct rpcrdma_ep *ep = &xprt->rx_ep; | |
c56c65fb TT |
228 | int connstate = 0; |
229 | ||
b4744e00 | 230 | trace_xprtrdma_conn_upcall(xprt, event); |
c56c65fb TT |
231 | switch (event->event) { |
232 | case RDMA_CM_EVENT_ADDR_RESOLVED: | |
233 | case RDMA_CM_EVENT_ROUTE_RESOLVED: | |
5675add3 | 234 | ia->ri_async_rc = 0; |
c56c65fb TT |
235 | complete(&ia->ri_done); |
236 | break; | |
237 | case RDMA_CM_EVENT_ADDR_ERROR: | |
52d28fe4 | 238 | ia->ri_async_rc = -EPROTO; |
c56c65fb TT |
239 | complete(&ia->ri_done); |
240 | break; | |
241 | case RDMA_CM_EVENT_ROUTE_ERROR: | |
242 | ia->ri_async_rc = -ENETUNREACH; | |
c56c65fb TT |
243 | complete(&ia->ri_done); |
244 | break; | |
bebd0318 CL |
245 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
246 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | |
d461f1f2 | 247 | pr_info("rpcrdma: removing device %s for %s:%s\n", |
173b8f49 | 248 | ia->ri_device->name, |
d461f1f2 | 249 | rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt)); |
bebd0318 CL |
250 | #endif |
251 | set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags); | |
252 | ep->rep_connected = -ENODEV; | |
253 | xprt_force_disconnect(&xprt->rx_xprt); | |
254 | wait_for_completion(&ia->ri_remove_done); | |
255 | ||
256 | ia->ri_id = NULL; | |
bebd0318 CL |
257 | ia->ri_device = NULL; |
258 | /* Return 1 to ensure the core destroys the id. */ | |
259 | return 1; | |
c56c65fb | 260 | case RDMA_CM_EVENT_ESTABLISHED: |
8a14793e | 261 | ++xprt->rx_xprt.connect_cookie; |
c56c65fb | 262 | connstate = 1; |
87cfb9a0 | 263 | rpcrdma_update_connect_private(xprt, &event->param.conn); |
c56c65fb TT |
264 | goto connected; |
265 | case RDMA_CM_EVENT_CONNECT_ERROR: | |
266 | connstate = -ENOTCONN; | |
267 | goto connected; | |
268 | case RDMA_CM_EVENT_UNREACHABLE: | |
52d28fe4 | 269 | connstate = -ENETUNREACH; |
c56c65fb TT |
270 | goto connected; |
271 | case RDMA_CM_EVENT_REJECTED: | |
d461f1f2 CL |
272 | dprintk("rpcrdma: connection to %s:%s rejected: %s\n", |
273 | rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt), | |
0a90487b | 274 | rdma_reject_msg(id, event->status)); |
c56c65fb | 275 | connstate = -ECONNREFUSED; |
0a90487b CL |
276 | if (event->status == IB_CM_REJ_STALE_CONN) |
277 | connstate = -EAGAIN; | |
c56c65fb TT |
278 | goto connected; |
279 | case RDMA_CM_EVENT_DISCONNECTED: | |
8a14793e | 280 | ++xprt->rx_xprt.connect_cookie; |
c56c65fb | 281 | connstate = -ECONNABORTED; |
c56c65fb | 282 | connected: |
be798f90 | 283 | xprt->rx_buf.rb_credits = 1; |
c56c65fb | 284 | ep->rep_connected = connstate; |
afadc468 | 285 | rpcrdma_conn_func(ep); |
c56c65fb | 286 | wake_up_all(&ep->rep_connect_wait); |
8079fb78 | 287 | /*FALLTHROUGH*/ |
c56c65fb | 288 | default: |
d461f1f2 CL |
289 | dprintk("RPC: %s: %s:%s on %s/%s (ep 0x%p): %s\n", |
290 | __func__, | |
291 | rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt), | |
173b8f49 CL |
292 | ia->ri_device->name, ia->ri_ops->ro_displayname, |
293 | ep, rdma_event_msg(event->event)); | |
c56c65fb TT |
294 | break; |
295 | } | |
296 | ||
297 | return 0; | |
298 | } | |
299 | ||
300 | static struct rdma_cm_id * | |
dd229cee | 301 | rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia) |
c56c65fb | 302 | { |
109b88ab | 303 | unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1; |
c56c65fb TT |
304 | struct rdma_cm_id *id; |
305 | int rc; | |
306 | ||
b4744e00 CL |
307 | trace_xprtrdma_conn_start(xprt); |
308 | ||
1a954051 | 309 | init_completion(&ia->ri_done); |
bebd0318 | 310 | init_completion(&ia->ri_remove_done); |
1a954051 | 311 | |
107c4beb CL |
312 | id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_conn_upcall, |
313 | xprt, RDMA_PS_TCP, IB_QPT_RC); | |
c56c65fb TT |
314 | if (IS_ERR(id)) { |
315 | rc = PTR_ERR(id); | |
316 | dprintk("RPC: %s: rdma_create_id() failed %i\n", | |
317 | __func__, rc); | |
318 | return id; | |
319 | } | |
320 | ||
5675add3 | 321 | ia->ri_async_rc = -ETIMEDOUT; |
dd229cee CL |
322 | rc = rdma_resolve_addr(id, NULL, |
323 | (struct sockaddr *)&xprt->rx_xprt.addr, | |
324 | RDMA_RESOLVE_TIMEOUT); | |
c56c65fb TT |
325 | if (rc) { |
326 | dprintk("RPC: %s: rdma_resolve_addr() failed %i\n", | |
327 | __func__, rc); | |
328 | goto out; | |
329 | } | |
109b88ab CL |
330 | rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout); |
331 | if (rc < 0) { | |
b4744e00 | 332 | trace_xprtrdma_conn_tout(xprt); |
109b88ab CL |
333 | goto out; |
334 | } | |
d0f36c46 | 335 | |
c56c65fb TT |
336 | rc = ia->ri_async_rc; |
337 | if (rc) | |
338 | goto out; | |
339 | ||
5675add3 | 340 | ia->ri_async_rc = -ETIMEDOUT; |
c56c65fb TT |
341 | rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT); |
342 | if (rc) { | |
343 | dprintk("RPC: %s: rdma_resolve_route() failed %i\n", | |
344 | __func__, rc); | |
56a6bd15 | 345 | goto out; |
c56c65fb | 346 | } |
109b88ab CL |
347 | rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout); |
348 | if (rc < 0) { | |
b4744e00 | 349 | trace_xprtrdma_conn_tout(xprt); |
56a6bd15 | 350 | goto out; |
109b88ab | 351 | } |
c56c65fb TT |
352 | rc = ia->ri_async_rc; |
353 | if (rc) | |
56a6bd15 | 354 | goto out; |
c56c65fb TT |
355 | |
356 | return id; | |
56a6bd15 | 357 | |
c56c65fb TT |
358 | out: |
359 | rdma_destroy_id(id); | |
360 | return ERR_PTR(rc); | |
361 | } | |
362 | ||
c56c65fb TT |
363 | /* |
364 | * Exported functions. | |
365 | */ | |
366 | ||
fff09594 CL |
367 | /** |
368 | * rpcrdma_ia_open - Open and initialize an Interface Adapter. | |
dd229cee | 369 | * @xprt: transport with IA to (re)initialize |
fff09594 CL |
370 | * |
371 | * Returns 0 on success, negative errno if an appropriate | |
372 | * Interface Adapter could not be found and opened. | |
c56c65fb TT |
373 | */ |
374 | int | |
dd229cee | 375 | rpcrdma_ia_open(struct rpcrdma_xprt *xprt) |
c56c65fb | 376 | { |
c56c65fb | 377 | struct rpcrdma_ia *ia = &xprt->rx_ia; |
d1ed857e CL |
378 | int rc; |
379 | ||
dd229cee | 380 | ia->ri_id = rpcrdma_create_id(xprt, ia); |
c56c65fb TT |
381 | if (IS_ERR(ia->ri_id)) { |
382 | rc = PTR_ERR(ia->ri_id); | |
fff09594 | 383 | goto out_err; |
c56c65fb | 384 | } |
89e0d112 | 385 | ia->ri_device = ia->ri_id->device; |
c56c65fb | 386 | |
ed082d36 | 387 | ia->ri_pd = ib_alloc_pd(ia->ri_device, 0); |
c56c65fb TT |
388 | if (IS_ERR(ia->ri_pd)) { |
389 | rc = PTR_ERR(ia->ri_pd); | |
b54054ca | 390 | pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc); |
fff09594 | 391 | goto out_err; |
c56c65fb TT |
392 | } |
393 | ||
fff09594 | 394 | switch (xprt_rdma_memreg_strategy) { |
ce5b3717 | 395 | case RPCRDMA_FRWR: |
b54054ca CL |
396 | if (frwr_is_supported(ia)) { |
397 | ia->ri_ops = &rpcrdma_frwr_memreg_ops; | |
398 | break; | |
399 | } | |
400 | /*FALLTHROUGH*/ | |
bd7ed1d1 | 401 | case RPCRDMA_MTHCAFMR: |
b54054ca CL |
402 | if (fmr_is_supported(ia)) { |
403 | ia->ri_ops = &rpcrdma_fmr_memreg_ops; | |
404 | break; | |
405 | } | |
406 | /*FALLTHROUGH*/ | |
bd7ed1d1 | 407 | default: |
fff09594 CL |
408 | pr_err("rpcrdma: Device %s does not support memreg mode %d\n", |
409 | ia->ri_device->name, xprt_rdma_memreg_strategy); | |
b54054ca | 410 | rc = -EINVAL; |
fff09594 | 411 | goto out_err; |
c56c65fb TT |
412 | } |
413 | ||
c56c65fb | 414 | return 0; |
5ae711a2 | 415 | |
fff09594 CL |
416 | out_err: |
417 | rpcrdma_ia_close(ia); | |
c56c65fb TT |
418 | return rc; |
419 | } | |
420 | ||
bebd0318 CL |
421 | /** |
422 | * rpcrdma_ia_remove - Handle device driver unload | |
423 | * @ia: interface adapter being removed | |
424 | * | |
425 | * Divest transport H/W resources associated with this adapter, | |
426 | * but allow it to be restored later. | |
427 | */ | |
428 | void | |
429 | rpcrdma_ia_remove(struct rpcrdma_ia *ia) | |
430 | { | |
431 | struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt, | |
432 | rx_ia); | |
433 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; | |
434 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | |
435 | struct rpcrdma_req *req; | |
436 | struct rpcrdma_rep *rep; | |
437 | ||
438 | cancel_delayed_work_sync(&buf->rb_refresh_worker); | |
439 | ||
440 | /* This is similar to rpcrdma_ep_destroy, but: | |
441 | * - Don't cancel the connect worker. | |
442 | * - Don't call rpcrdma_ep_disconnect, which waits | |
443 | * for another conn upcall, which will deadlock. | |
444 | * - rdma_disconnect is unneeded, the underlying | |
445 | * connection is already gone. | |
446 | */ | |
447 | if (ia->ri_id->qp) { | |
448 | ib_drain_qp(ia->ri_id->qp); | |
449 | rdma_destroy_qp(ia->ri_id); | |
450 | ia->ri_id->qp = NULL; | |
451 | } | |
452 | ib_free_cq(ep->rep_attr.recv_cq); | |
25524288 | 453 | ep->rep_attr.recv_cq = NULL; |
bebd0318 | 454 | ib_free_cq(ep->rep_attr.send_cq); |
25524288 | 455 | ep->rep_attr.send_cq = NULL; |
bebd0318 CL |
456 | |
457 | /* The ULP is responsible for ensuring all DMA | |
458 | * mappings and MRs are gone. | |
459 | */ | |
460 | list_for_each_entry(rep, &buf->rb_recv_bufs, rr_list) | |
461 | rpcrdma_dma_unmap_regbuf(rep->rr_rdmabuf); | |
462 | list_for_each_entry(req, &buf->rb_allreqs, rl_all) { | |
463 | rpcrdma_dma_unmap_regbuf(req->rl_rdmabuf); | |
464 | rpcrdma_dma_unmap_regbuf(req->rl_sendbuf); | |
465 | rpcrdma_dma_unmap_regbuf(req->rl_recvbuf); | |
466 | } | |
96ceddea | 467 | rpcrdma_mrs_destroy(buf); |
25524288 CL |
468 | ib_dealloc_pd(ia->ri_pd); |
469 | ia->ri_pd = NULL; | |
bebd0318 CL |
470 | |
471 | /* Allow waiters to continue */ | |
472 | complete(&ia->ri_remove_done); | |
b4744e00 CL |
473 | |
474 | trace_xprtrdma_remove(r_xprt); | |
bebd0318 CL |
475 | } |
476 | ||
fff09594 CL |
477 | /** |
478 | * rpcrdma_ia_close - Clean up/close an IA. | |
479 | * @ia: interface adapter to close | |
480 | * | |
c56c65fb TT |
481 | */ |
482 | void | |
483 | rpcrdma_ia_close(struct rpcrdma_ia *ia) | |
484 | { | |
fee08caf TT |
485 | if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) { |
486 | if (ia->ri_id->qp) | |
487 | rdma_destroy_qp(ia->ri_id); | |
56a6bd15 | 488 | rdma_destroy_id(ia->ri_id); |
fee08caf | 489 | } |
fff09594 CL |
490 | ia->ri_id = NULL; |
491 | ia->ri_device = NULL; | |
6d44698d CL |
492 | |
493 | /* If the pd is still busy, xprtrdma missed freeing a resource */ | |
494 | if (ia->ri_pd && !IS_ERR(ia->ri_pd)) | |
7dd78647 | 495 | ib_dealloc_pd(ia->ri_pd); |
fff09594 | 496 | ia->ri_pd = NULL; |
c56c65fb TT |
497 | } |
498 | ||
499 | /* | |
500 | * Create unconnected endpoint. | |
501 | */ | |
502 | int | |
503 | rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, | |
16f906d6 | 504 | struct rpcrdma_create_data_internal *cdata) |
c56c65fb | 505 | { |
87cfb9a0 | 506 | struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private; |
fc664485 | 507 | struct ib_cq *sendcq, *recvcq; |
914fcad9 | 508 | unsigned int max_sge; |
2fa8f88d | 509 | int rc; |
c56c65fb | 510 | |
eed50879 CL |
511 | max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge, |
512 | RPCRDMA_MAX_SEND_SGES); | |
16f906d6 CL |
513 | if (max_sge < RPCRDMA_MIN_SEND_SGES) { |
514 | pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); | |
b3221d6a CL |
515 | return -ENOMEM; |
516 | } | |
1179e2c2 | 517 | ia->ri_max_send_sges = max_sge; |
b3221d6a | 518 | |
914fcad9 CL |
519 | rc = ia->ri_ops->ro_open(ia, ep, cdata); |
520 | if (rc) | |
521 | return rc; | |
c56c65fb TT |
522 | |
523 | ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall; | |
524 | ep->rep_attr.qp_context = ep; | |
c56c65fb | 525 | ep->rep_attr.srq = NULL; |
16f906d6 | 526 | ep->rep_attr.cap.max_send_sge = max_sge; |
c56c65fb TT |
527 | ep->rep_attr.cap.max_recv_sge = 1; |
528 | ep->rep_attr.cap.max_inline_data = 0; | |
529 | ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR; | |
530 | ep->rep_attr.qp_type = IB_QPT_RC; | |
531 | ep->rep_attr.port_num = ~0; | |
532 | ||
533 | dprintk("RPC: %s: requested max: dtos: send %d recv %d; " | |
534 | "iovs: send %d recv %d\n", | |
535 | __func__, | |
536 | ep->rep_attr.cap.max_send_wr, | |
537 | ep->rep_attr.cap.max_recv_wr, | |
538 | ep->rep_attr.cap.max_send_sge, | |
539 | ep->rep_attr.cap.max_recv_sge); | |
540 | ||
541 | /* set trigger for requesting send completion */ | |
ae72950a CL |
542 | ep->rep_send_batch = min_t(unsigned int, RPCRDMA_MAX_SEND_BATCH, |
543 | cdata->max_requests >> 2); | |
544 | ep->rep_send_count = ep->rep_send_batch; | |
c56c65fb | 545 | init_waitqueue_head(&ep->rep_connect_wait); |
254f91e2 | 546 | INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); |
c56c65fb | 547 | |
2fa8f88d CL |
548 | sendcq = ib_alloc_cq(ia->ri_device, NULL, |
549 | ep->rep_attr.cap.max_send_wr + 1, | |
a4699f56 | 550 | 1, IB_POLL_WORKQUEUE); |
fc664485 CL |
551 | if (IS_ERR(sendcq)) { |
552 | rc = PTR_ERR(sendcq); | |
553 | dprintk("RPC: %s: failed to create send CQ: %i\n", | |
c56c65fb TT |
554 | __func__, rc); |
555 | goto out1; | |
556 | } | |
557 | ||
552bf225 CL |
558 | recvcq = ib_alloc_cq(ia->ri_device, NULL, |
559 | ep->rep_attr.cap.max_recv_wr + 1, | |
d8f532d2 | 560 | 0, IB_POLL_WORKQUEUE); |
fc664485 CL |
561 | if (IS_ERR(recvcq)) { |
562 | rc = PTR_ERR(recvcq); | |
563 | dprintk("RPC: %s: failed to create recv CQ: %i\n", | |
564 | __func__, rc); | |
565 | goto out2; | |
566 | } | |
567 | ||
fc664485 CL |
568 | ep->rep_attr.send_cq = sendcq; |
569 | ep->rep_attr.recv_cq = recvcq; | |
c56c65fb TT |
570 | |
571 | /* Initialize cma parameters */ | |
b2dde94b | 572 | memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma)); |
c56c65fb | 573 | |
87cfb9a0 CL |
574 | /* Prepare RDMA-CM private message */ |
575 | pmsg->cp_magic = rpcrdma_cmp_magic; | |
576 | pmsg->cp_version = RPCRDMA_CMP_VERSION; | |
c8b920bb | 577 | pmsg->cp_flags |= ia->ri_ops->ro_send_w_inv_ok; |
87cfb9a0 CL |
578 | pmsg->cp_send_size = rpcrdma_encode_buffer_size(cdata->inline_wsize); |
579 | pmsg->cp_recv_size = rpcrdma_encode_buffer_size(cdata->inline_rsize); | |
580 | ep->rep_remote_cma.private_data = pmsg; | |
581 | ep->rep_remote_cma.private_data_len = sizeof(*pmsg); | |
c56c65fb TT |
582 | |
583 | /* Client offers RDMA Read but does not initiate */ | |
b334eaab | 584 | ep->rep_remote_cma.initiator_depth = 0; |
b7e85fff CL |
585 | ep->rep_remote_cma.responder_resources = |
586 | min_t(int, U8_MAX, ia->ri_device->attrs.max_qp_rd_atom); | |
c56c65fb | 587 | |
b2dde94b CL |
588 | /* Limit transport retries so client can detect server |
589 | * GID changes quickly. RPC layer handles re-establishing | |
590 | * transport connection and retransmission. | |
591 | */ | |
592 | ep->rep_remote_cma.retry_count = 6; | |
593 | ||
594 | /* RPC-over-RDMA handles its own flow control. In addition, | |
595 | * make all RNR NAKs visible so we know that RPC-over-RDMA | |
596 | * flow control is working correctly (no NAKs should be seen). | |
597 | */ | |
c56c65fb TT |
598 | ep->rep_remote_cma.flow_control = 0; |
599 | ep->rep_remote_cma.rnr_retry_count = 0; | |
600 | ||
601 | return 0; | |
602 | ||
603 | out2: | |
2fa8f88d | 604 | ib_free_cq(sendcq); |
c56c65fb TT |
605 | out1: |
606 | return rc; | |
607 | } | |
608 | ||
609 | /* | |
610 | * rpcrdma_ep_destroy | |
611 | * | |
612 | * Disconnect and destroy endpoint. After this, the only | |
613 | * valid operations on the ep are to free it (if dynamically | |
614 | * allocated) or re-create it. | |
c56c65fb | 615 | */ |
7f1d5419 | 616 | void |
c56c65fb TT |
617 | rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) |
618 | { | |
254f91e2 CL |
619 | cancel_delayed_work_sync(&ep->rep_connect_worker); |
620 | ||
25524288 | 621 | if (ia->ri_id && ia->ri_id->qp) { |
550d7502 | 622 | rpcrdma_ep_disconnect(ep, ia); |
fee08caf TT |
623 | rdma_destroy_qp(ia->ri_id); |
624 | ia->ri_id->qp = NULL; | |
c56c65fb TT |
625 | } |
626 | ||
25524288 CL |
627 | if (ep->rep_attr.recv_cq) |
628 | ib_free_cq(ep->rep_attr.recv_cq); | |
629 | if (ep->rep_attr.send_cq) | |
630 | ib_free_cq(ep->rep_attr.send_cq); | |
c56c65fb TT |
631 | } |
632 | ||
a9b0e381 CL |
633 | /* Re-establish a connection after a device removal event. |
634 | * Unlike a normal reconnection, a fresh PD and a new set | |
635 | * of MRs and buffers is needed. | |
636 | */ | |
637 | static int | |
638 | rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt, | |
639 | struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) | |
640 | { | |
a9b0e381 CL |
641 | int rc, err; |
642 | ||
b4744e00 | 643 | trace_xprtrdma_reinsert(r_xprt); |
a9b0e381 CL |
644 | |
645 | rc = -EHOSTUNREACH; | |
dd229cee | 646 | if (rpcrdma_ia_open(r_xprt)) |
a9b0e381 CL |
647 | goto out1; |
648 | ||
649 | rc = -ENOMEM; | |
650 | err = rpcrdma_ep_create(ep, ia, &r_xprt->rx_data); | |
651 | if (err) { | |
652 | pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err); | |
653 | goto out2; | |
654 | } | |
655 | ||
656 | rc = -ENETUNREACH; | |
657 | err = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr); | |
658 | if (err) { | |
659 | pr_err("rpcrdma: rdma_create_qp returned %d\n", err); | |
660 | goto out3; | |
661 | } | |
662 | ||
96ceddea | 663 | rpcrdma_mrs_create(r_xprt); |
a9b0e381 CL |
664 | return 0; |
665 | ||
666 | out3: | |
667 | rpcrdma_ep_destroy(ep, ia); | |
668 | out2: | |
669 | rpcrdma_ia_close(ia); | |
670 | out1: | |
671 | return rc; | |
672 | } | |
673 | ||
1890896b CL |
674 | static int |
675 | rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, struct rpcrdma_ep *ep, | |
676 | struct rpcrdma_ia *ia) | |
677 | { | |
1890896b CL |
678 | struct rdma_cm_id *id, *old; |
679 | int err, rc; | |
680 | ||
b4744e00 | 681 | trace_xprtrdma_reconnect(r_xprt); |
1890896b CL |
682 | |
683 | rpcrdma_ep_disconnect(ep, ia); | |
684 | ||
685 | rc = -EHOSTUNREACH; | |
dd229cee | 686 | id = rpcrdma_create_id(r_xprt, ia); |
1890896b CL |
687 | if (IS_ERR(id)) |
688 | goto out; | |
689 | ||
690 | /* As long as the new ID points to the same device as the | |
691 | * old ID, we can reuse the transport's existing PD and all | |
692 | * previously allocated MRs. Also, the same device means | |
693 | * the transport's previous DMA mappings are still valid. | |
694 | * | |
695 | * This is a sanity check only. There should be no way these | |
696 | * point to two different devices here. | |
697 | */ | |
698 | old = id; | |
699 | rc = -ENETUNREACH; | |
700 | if (ia->ri_device != id->device) { | |
701 | pr_err("rpcrdma: can't reconnect on different device!\n"); | |
702 | goto out_destroy; | |
703 | } | |
704 | ||
705 | err = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr); | |
706 | if (err) { | |
707 | dprintk("RPC: %s: rdma_create_qp returned %d\n", | |
708 | __func__, err); | |
709 | goto out_destroy; | |
710 | } | |
711 | ||
712 | /* Atomically replace the transport's ID and QP. */ | |
713 | rc = 0; | |
714 | old = ia->ri_id; | |
715 | ia->ri_id = id; | |
716 | rdma_destroy_qp(old); | |
717 | ||
718 | out_destroy: | |
56a6bd15 | 719 | rdma_destroy_id(old); |
1890896b CL |
720 | out: |
721 | return rc; | |
722 | } | |
723 | ||
c56c65fb TT |
724 | /* |
725 | * Connect unconnected endpoint. | |
726 | */ | |
727 | int | |
728 | rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) | |
729 | { | |
0a90487b CL |
730 | struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt, |
731 | rx_ia); | |
1890896b | 732 | int rc; |
c56c65fb | 733 | |
c56c65fb | 734 | retry: |
1890896b CL |
735 | switch (ep->rep_connected) { |
736 | case 0: | |
ec62f40d CL |
737 | dprintk("RPC: %s: connecting...\n", __func__); |
738 | rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr); | |
739 | if (rc) { | |
740 | dprintk("RPC: %s: rdma_create_qp failed %i\n", | |
741 | __func__, rc); | |
1890896b CL |
742 | rc = -ENETUNREACH; |
743 | goto out_noupdate; | |
ec62f40d | 744 | } |
1890896b | 745 | break; |
a9b0e381 CL |
746 | case -ENODEV: |
747 | rc = rpcrdma_ep_recreate_xprt(r_xprt, ep, ia); | |
748 | if (rc) | |
749 | goto out_noupdate; | |
750 | break; | |
1890896b CL |
751 | default: |
752 | rc = rpcrdma_ep_reconnect(r_xprt, ep, ia); | |
753 | if (rc) | |
754 | goto out; | |
c56c65fb TT |
755 | } |
756 | ||
c56c65fb TT |
757 | ep->rep_connected = 0; |
758 | ||
759 | rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma); | |
760 | if (rc) { | |
761 | dprintk("RPC: %s: rdma_connect() failed with %i\n", | |
762 | __func__, rc); | |
763 | goto out; | |
764 | } | |
765 | ||
c56c65fb | 766 | wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0); |
c56c65fb | 767 | if (ep->rep_connected <= 0) { |
0a90487b | 768 | if (ep->rep_connected == -EAGAIN) |
c56c65fb TT |
769 | goto retry; |
770 | rc = ep->rep_connected; | |
0a90487b | 771 | goto out; |
c56c65fb TT |
772 | } |
773 | ||
0a90487b | 774 | dprintk("RPC: %s: connected\n", __func__); |
7c8d9e7c CL |
775 | |
776 | rpcrdma_post_recvs(r_xprt, true); | |
0a90487b | 777 | |
c56c65fb TT |
778 | out: |
779 | if (rc) | |
780 | ep->rep_connected = rc; | |
1890896b CL |
781 | |
782 | out_noupdate: | |
c56c65fb TT |
783 | return rc; |
784 | } | |
785 | ||
786 | /* | |
787 | * rpcrdma_ep_disconnect | |
788 | * | |
789 | * This is separate from destroy to facilitate the ability | |
790 | * to reconnect without recreating the endpoint. | |
791 | * | |
792 | * This call is not reentrant, and must not be made in parallel | |
793 | * on the same endpoint. | |
794 | */ | |
282191cb | 795 | void |
c56c65fb TT |
796 | rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) |
797 | { | |
798 | int rc; | |
799 | ||
c56c65fb | 800 | rc = rdma_disconnect(ia->ri_id); |
b4744e00 | 801 | if (!rc) |
c56c65fb TT |
802 | /* returns without wait if not connected */ |
803 | wait_event_interruptible(ep->rep_connect_wait, | |
804 | ep->rep_connected != 1); | |
b4744e00 | 805 | else |
c56c65fb | 806 | ep->rep_connected = rc; |
b4744e00 CL |
807 | trace_xprtrdma_disconnect(container_of(ep, struct rpcrdma_xprt, |
808 | rx_ep), rc); | |
550d7502 CL |
809 | |
810 | ib_drain_qp(ia->ri_id->qp); | |
c56c65fb TT |
811 | } |
812 | ||
ae72950a CL |
813 | /* Fixed-size circular FIFO queue. This implementation is wait-free and |
814 | * lock-free. | |
815 | * | |
816 | * Consumer is the code path that posts Sends. This path dequeues a | |
817 | * sendctx for use by a Send operation. Multiple consumer threads | |
818 | * are serialized by the RPC transport lock, which allows only one | |
819 | * ->send_request call at a time. | |
820 | * | |
821 | * Producer is the code path that handles Send completions. This path | |
822 | * enqueues a sendctx that has been completed. Multiple producer | |
823 | * threads are serialized by the ib_poll_cq() function. | |
824 | */ | |
825 | ||
826 | /* rpcrdma_sendctxs_destroy() assumes caller has already quiesced | |
827 | * queue activity, and ib_drain_qp has flushed all remaining Send | |
828 | * requests. | |
829 | */ | |
830 | static void rpcrdma_sendctxs_destroy(struct rpcrdma_buffer *buf) | |
831 | { | |
832 | unsigned long i; | |
833 | ||
834 | for (i = 0; i <= buf->rb_sc_last; i++) | |
835 | kfree(buf->rb_sc_ctxs[i]); | |
836 | kfree(buf->rb_sc_ctxs); | |
837 | } | |
838 | ||
839 | static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ia *ia) | |
840 | { | |
841 | struct rpcrdma_sendctx *sc; | |
842 | ||
843 | sc = kzalloc(sizeof(*sc) + | |
844 | ia->ri_max_send_sges * sizeof(struct ib_sge), | |
845 | GFP_KERNEL); | |
846 | if (!sc) | |
847 | return NULL; | |
848 | ||
849 | sc->sc_wr.wr_cqe = &sc->sc_cqe; | |
850 | sc->sc_wr.sg_list = sc->sc_sges; | |
851 | sc->sc_wr.opcode = IB_WR_SEND; | |
852 | sc->sc_cqe.done = rpcrdma_wc_send; | |
853 | return sc; | |
854 | } | |
855 | ||
856 | static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt) | |
857 | { | |
858 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | |
859 | struct rpcrdma_sendctx *sc; | |
860 | unsigned long i; | |
861 | ||
862 | /* Maximum number of concurrent outstanding Send WRs. Capping | |
863 | * the circular queue size stops Send Queue overflow by causing | |
864 | * the ->send_request call to fail temporarily before too many | |
865 | * Sends are posted. | |
866 | */ | |
867 | i = buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS; | |
868 | dprintk("RPC: %s: allocating %lu send_ctxs\n", __func__, i); | |
869 | buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL); | |
870 | if (!buf->rb_sc_ctxs) | |
871 | return -ENOMEM; | |
872 | ||
873 | buf->rb_sc_last = i - 1; | |
874 | for (i = 0; i <= buf->rb_sc_last; i++) { | |
875 | sc = rpcrdma_sendctx_create(&r_xprt->rx_ia); | |
876 | if (!sc) | |
877 | goto out_destroy; | |
878 | ||
879 | sc->sc_xprt = r_xprt; | |
880 | buf->rb_sc_ctxs[i] = sc; | |
881 | } | |
2fad6592 | 882 | buf->rb_flags = 0; |
ae72950a CL |
883 | |
884 | return 0; | |
885 | ||
886 | out_destroy: | |
887 | rpcrdma_sendctxs_destroy(buf); | |
888 | return -ENOMEM; | |
889 | } | |
890 | ||
891 | /* The sendctx queue is not guaranteed to have a size that is a | |
892 | * power of two, thus the helpers in circ_buf.h cannot be used. | |
893 | * The other option is to use modulus (%), which can be expensive. | |
894 | */ | |
895 | static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf, | |
896 | unsigned long item) | |
897 | { | |
898 | return likely(item < buf->rb_sc_last) ? item + 1 : 0; | |
899 | } | |
900 | ||
901 | /** | |
902 | * rpcrdma_sendctx_get_locked - Acquire a send context | |
903 | * @buf: transport buffers from which to acquire an unused context | |
904 | * | |
905 | * Returns pointer to a free send completion context; or NULL if | |
906 | * the queue is empty. | |
907 | * | |
908 | * Usage: Called to acquire an SGE array before preparing a Send WR. | |
909 | * | |
910 | * The caller serializes calls to this function (per rpcrdma_buffer), | |
911 | * and provides an effective memory barrier that flushes the new value | |
912 | * of rb_sc_head. | |
913 | */ | |
914 | struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf) | |
915 | { | |
916 | struct rpcrdma_xprt *r_xprt; | |
917 | struct rpcrdma_sendctx *sc; | |
918 | unsigned long next_head; | |
919 | ||
920 | next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head); | |
921 | ||
922 | if (next_head == READ_ONCE(buf->rb_sc_tail)) | |
923 | goto out_emptyq; | |
924 | ||
925 | /* ORDER: item must be accessed _before_ head is updated */ | |
926 | sc = buf->rb_sc_ctxs[next_head]; | |
927 | ||
928 | /* Releasing the lock in the caller acts as a memory | |
929 | * barrier that flushes rb_sc_head. | |
930 | */ | |
931 | buf->rb_sc_head = next_head; | |
932 | ||
933 | return sc; | |
934 | ||
935 | out_emptyq: | |
936 | /* The queue is "empty" if there have not been enough Send | |
937 | * completions recently. This is a sign the Send Queue is | |
938 | * backing up. Cause the caller to pause and try again. | |
939 | */ | |
2fad6592 | 940 | set_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags); |
ae72950a CL |
941 | r_xprt = container_of(buf, struct rpcrdma_xprt, rx_buf); |
942 | r_xprt->rx_stats.empty_sendctx_q++; | |
943 | return NULL; | |
944 | } | |
945 | ||
946 | /** | |
947 | * rpcrdma_sendctx_put_locked - Release a send context | |
948 | * @sc: send context to release | |
949 | * | |
950 | * Usage: Called from Send completion to return a sendctxt | |
951 | * to the queue. | |
952 | * | |
953 | * The caller serializes calls to this function (per rpcrdma_buffer). | |
954 | */ | |
efd81e90 CL |
955 | static void |
956 | rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc) | |
ae72950a CL |
957 | { |
958 | struct rpcrdma_buffer *buf = &sc->sc_xprt->rx_buf; | |
959 | unsigned long next_tail; | |
960 | ||
961 | /* Unmap SGEs of previously completed by unsignaled | |
962 | * Sends by walking up the queue until @sc is found. | |
963 | */ | |
964 | next_tail = buf->rb_sc_tail; | |
965 | do { | |
966 | next_tail = rpcrdma_sendctx_next(buf, next_tail); | |
967 | ||
968 | /* ORDER: item must be accessed _before_ tail is updated */ | |
969 | rpcrdma_unmap_sendctx(buf->rb_sc_ctxs[next_tail]); | |
970 | ||
971 | } while (buf->rb_sc_ctxs[next_tail] != sc); | |
972 | ||
973 | /* Paired with READ_ONCE */ | |
974 | smp_store_release(&buf->rb_sc_tail, next_tail); | |
2fad6592 CL |
975 | |
976 | if (test_and_clear_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags)) { | |
977 | smp_mb__after_atomic(); | |
978 | xprt_write_space(&sc->sc_xprt->rx_xprt); | |
979 | } | |
ae72950a CL |
980 | } |
981 | ||
505bbe64 CL |
982 | static void |
983 | rpcrdma_mr_recovery_worker(struct work_struct *work) | |
984 | { | |
985 | struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer, | |
986 | rb_recovery_worker.work); | |
96ceddea | 987 | struct rpcrdma_mr *mr; |
505bbe64 CL |
988 | |
989 | spin_lock(&buf->rb_recovery_lock); | |
990 | while (!list_empty(&buf->rb_stale_mrs)) { | |
96ceddea | 991 | mr = rpcrdma_mr_pop(&buf->rb_stale_mrs); |
505bbe64 CL |
992 | spin_unlock(&buf->rb_recovery_lock); |
993 | ||
1c443eff | 994 | trace_xprtrdma_recover_mr(mr); |
96ceddea | 995 | mr->mr_xprt->rx_ia.ri_ops->ro_recover_mr(mr); |
505bbe64 CL |
996 | |
997 | spin_lock(&buf->rb_recovery_lock); | |
53d78523 | 998 | } |
505bbe64 CL |
999 | spin_unlock(&buf->rb_recovery_lock); |
1000 | } | |
1001 | ||
1002 | void | |
96ceddea | 1003 | rpcrdma_mr_defer_recovery(struct rpcrdma_mr *mr) |
505bbe64 | 1004 | { |
96ceddea | 1005 | struct rpcrdma_xprt *r_xprt = mr->mr_xprt; |
505bbe64 CL |
1006 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
1007 | ||
1008 | spin_lock(&buf->rb_recovery_lock); | |
96ceddea | 1009 | rpcrdma_mr_push(mr, &buf->rb_stale_mrs); |
505bbe64 CL |
1010 | spin_unlock(&buf->rb_recovery_lock); |
1011 | ||
1012 | schedule_delayed_work(&buf->rb_recovery_worker, 0); | |
1013 | } | |
1014 | ||
e2ac236c | 1015 | static void |
96ceddea | 1016 | rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt) |
e2ac236c CL |
1017 | { |
1018 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | |
1019 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | |
1020 | unsigned int count; | |
1021 | LIST_HEAD(free); | |
1022 | LIST_HEAD(all); | |
1023 | ||
ae741a85 | 1024 | for (count = 0; count < 3; count++) { |
96ceddea | 1025 | struct rpcrdma_mr *mr; |
e2ac236c CL |
1026 | int rc; |
1027 | ||
96ceddea CL |
1028 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
1029 | if (!mr) | |
e2ac236c CL |
1030 | break; |
1031 | ||
96ceddea | 1032 | rc = ia->ri_ops->ro_init_mr(ia, mr); |
e2ac236c | 1033 | if (rc) { |
96ceddea | 1034 | kfree(mr); |
e2ac236c CL |
1035 | break; |
1036 | } | |
1037 | ||
96ceddea | 1038 | mr->mr_xprt = r_xprt; |
e2ac236c | 1039 | |
96ceddea CL |
1040 | list_add(&mr->mr_list, &free); |
1041 | list_add(&mr->mr_all, &all); | |
e2ac236c CL |
1042 | } |
1043 | ||
96ceddea CL |
1044 | spin_lock(&buf->rb_mrlock); |
1045 | list_splice(&free, &buf->rb_mrs); | |
e2ac236c CL |
1046 | list_splice(&all, &buf->rb_all); |
1047 | r_xprt->rx_stats.mrs_allocated += count; | |
96ceddea | 1048 | spin_unlock(&buf->rb_mrlock); |
1c443eff | 1049 | trace_xprtrdma_createmrs(r_xprt, count); |
9e679d5e CL |
1050 | |
1051 | xprt_write_space(&r_xprt->rx_xprt); | |
e2ac236c CL |
1052 | } |
1053 | ||
1054 | static void | |
1055 | rpcrdma_mr_refresh_worker(struct work_struct *work) | |
1056 | { | |
1057 | struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer, | |
1058 | rb_refresh_worker.work); | |
1059 | struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, | |
1060 | rx_buf); | |
1061 | ||
96ceddea | 1062 | rpcrdma_mrs_create(r_xprt); |
e2ac236c CL |
1063 | } |
1064 | ||
f531a5db | 1065 | struct rpcrdma_req * |
1392402c CL |
1066 | rpcrdma_create_req(struct rpcrdma_xprt *r_xprt) |
1067 | { | |
f531a5db | 1068 | struct rpcrdma_buffer *buffer = &r_xprt->rx_buf; |
2dd4a012 | 1069 | struct rpcrdma_regbuf *rb; |
1392402c | 1070 | struct rpcrdma_req *req; |
1392402c | 1071 | |
85275c87 | 1072 | req = kzalloc(sizeof(*req), GFP_KERNEL); |
1392402c | 1073 | if (req == NULL) |
85275c87 | 1074 | return ERR_PTR(-ENOMEM); |
1392402c | 1075 | |
2dd4a012 CL |
1076 | rb = rpcrdma_alloc_regbuf(RPCRDMA_HDRBUF_SIZE, |
1077 | DMA_TO_DEVICE, GFP_KERNEL); | |
1078 | if (IS_ERR(rb)) { | |
1079 | kfree(req); | |
1080 | return ERR_PTR(-ENOMEM); | |
1081 | } | |
1082 | req->rl_rdmabuf = rb; | |
1083 | xdr_buf_init(&req->rl_hdrbuf, rb->rg_base, rdmab_length(rb)); | |
1084 | req->rl_buffer = buffer; | |
1085 | INIT_LIST_HEAD(&req->rl_registered); | |
1086 | ||
f531a5db CL |
1087 | spin_lock(&buffer->rb_reqslock); |
1088 | list_add(&req->rl_all, &buffer->rb_allreqs); | |
1089 | spin_unlock(&buffer->rb_reqslock); | |
1392402c | 1090 | return req; |
1392402c CL |
1091 | } |
1092 | ||
7c8d9e7c CL |
1093 | static int |
1094 | rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt, bool temp) | |
1392402c CL |
1095 | { |
1096 | struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; | |
d698c4a0 | 1097 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
1392402c CL |
1098 | struct rpcrdma_rep *rep; |
1099 | int rc; | |
1100 | ||
1101 | rc = -ENOMEM; | |
6b1184cd | 1102 | rep = kzalloc(sizeof(*rep), GFP_KERNEL); |
1392402c CL |
1103 | if (rep == NULL) |
1104 | goto out; | |
1392402c | 1105 | |
13650c23 | 1106 | rep->rr_rdmabuf = rpcrdma_alloc_regbuf(cdata->inline_rsize, |
99ef4db3 | 1107 | DMA_FROM_DEVICE, GFP_KERNEL); |
6b1184cd CL |
1108 | if (IS_ERR(rep->rr_rdmabuf)) { |
1109 | rc = PTR_ERR(rep->rr_rdmabuf); | |
1392402c | 1110 | goto out_free; |
6b1184cd | 1111 | } |
96f8778f CL |
1112 | xdr_buf_init(&rep->rr_hdrbuf, rep->rr_rdmabuf->rg_base, |
1113 | rdmab_length(rep->rr_rdmabuf)); | |
1392402c | 1114 | |
1519e969 | 1115 | rep->rr_cqe.done = rpcrdma_wc_receive; |
fed171b3 | 1116 | rep->rr_rxprt = r_xprt; |
d8f532d2 | 1117 | INIT_WORK(&rep->rr_work, rpcrdma_deferred_completion); |
6ea8e711 CL |
1118 | rep->rr_recv_wr.next = NULL; |
1119 | rep->rr_recv_wr.wr_cqe = &rep->rr_cqe; | |
1120 | rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; | |
1121 | rep->rr_recv_wr.num_sge = 1; | |
7c8d9e7c | 1122 | rep->rr_temp = temp; |
d698c4a0 CL |
1123 | |
1124 | spin_lock(&buf->rb_lock); | |
1125 | list_add(&rep->rr_list, &buf->rb_recv_bufs); | |
1126 | spin_unlock(&buf->rb_lock); | |
1127 | return 0; | |
1392402c CL |
1128 | |
1129 | out_free: | |
1130 | kfree(rep); | |
1131 | out: | |
d698c4a0 CL |
1132 | dprintk("RPC: %s: reply buffer %d alloc failed\n", |
1133 | __func__, rc); | |
1134 | return rc; | |
1392402c CL |
1135 | } |
1136 | ||
c56c65fb | 1137 | int |
ac920d04 | 1138 | rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) |
c56c65fb | 1139 | { |
ac920d04 | 1140 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
c56c65fb TT |
1141 | int i, rc; |
1142 | ||
1e465fd4 | 1143 | buf->rb_max_requests = r_xprt->rx_data.max_requests; |
f531a5db | 1144 | buf->rb_bc_srv_max_requests = 0; |
96ceddea | 1145 | spin_lock_init(&buf->rb_mrlock); |
505bbe64 CL |
1146 | spin_lock_init(&buf->rb_lock); |
1147 | spin_lock_init(&buf->rb_recovery_lock); | |
96ceddea | 1148 | INIT_LIST_HEAD(&buf->rb_mrs); |
e2ac236c | 1149 | INIT_LIST_HEAD(&buf->rb_all); |
505bbe64 | 1150 | INIT_LIST_HEAD(&buf->rb_stale_mrs); |
e2ac236c CL |
1151 | INIT_DELAYED_WORK(&buf->rb_refresh_worker, |
1152 | rpcrdma_mr_refresh_worker); | |
505bbe64 CL |
1153 | INIT_DELAYED_WORK(&buf->rb_recovery_worker, |
1154 | rpcrdma_mr_recovery_worker); | |
c56c65fb | 1155 | |
96ceddea | 1156 | rpcrdma_mrs_create(r_xprt); |
c56c65fb | 1157 | |
1e465fd4 | 1158 | INIT_LIST_HEAD(&buf->rb_send_bufs); |
f531a5db CL |
1159 | INIT_LIST_HEAD(&buf->rb_allreqs); |
1160 | spin_lock_init(&buf->rb_reqslock); | |
c56c65fb TT |
1161 | for (i = 0; i < buf->rb_max_requests; i++) { |
1162 | struct rpcrdma_req *req; | |
c56c65fb | 1163 | |
1392402c CL |
1164 | req = rpcrdma_create_req(r_xprt); |
1165 | if (IS_ERR(req)) { | |
c56c65fb TT |
1166 | dprintk("RPC: %s: request buffer %d alloc" |
1167 | " failed\n", __func__, i); | |
1392402c | 1168 | rc = PTR_ERR(req); |
c56c65fb TT |
1169 | goto out; |
1170 | } | |
a80d66c9 | 1171 | list_add(&req->rl_list, &buf->rb_send_bufs); |
1e465fd4 CL |
1172 | } |
1173 | ||
7c8d9e7c | 1174 | buf->rb_posted_receives = 0; |
1e465fd4 | 1175 | INIT_LIST_HEAD(&buf->rb_recv_bufs); |
1392402c | 1176 | |
ae72950a CL |
1177 | rc = rpcrdma_sendctxs_create(r_xprt); |
1178 | if (rc) | |
1179 | goto out; | |
1180 | ||
c56c65fb TT |
1181 | return 0; |
1182 | out: | |
1183 | rpcrdma_buffer_destroy(buf); | |
1184 | return rc; | |
1185 | } | |
1186 | ||
1392402c | 1187 | static void |
13650c23 | 1188 | rpcrdma_destroy_rep(struct rpcrdma_rep *rep) |
1392402c | 1189 | { |
13650c23 | 1190 | rpcrdma_free_regbuf(rep->rr_rdmabuf); |
1392402c CL |
1191 | kfree(rep); |
1192 | } | |
1193 | ||
f531a5db | 1194 | void |
13650c23 | 1195 | rpcrdma_destroy_req(struct rpcrdma_req *req) |
1392402c | 1196 | { |
13650c23 CL |
1197 | rpcrdma_free_regbuf(req->rl_recvbuf); |
1198 | rpcrdma_free_regbuf(req->rl_sendbuf); | |
1199 | rpcrdma_free_regbuf(req->rl_rdmabuf); | |
1392402c CL |
1200 | kfree(req); |
1201 | } | |
1202 | ||
e2ac236c | 1203 | static void |
96ceddea | 1204 | rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf) |
e2ac236c CL |
1205 | { |
1206 | struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, | |
1207 | rx_buf); | |
1208 | struct rpcrdma_ia *ia = rdmab_to_ia(buf); | |
96ceddea | 1209 | struct rpcrdma_mr *mr; |
e2ac236c CL |
1210 | unsigned int count; |
1211 | ||
1212 | count = 0; | |
96ceddea | 1213 | spin_lock(&buf->rb_mrlock); |
e2ac236c | 1214 | while (!list_empty(&buf->rb_all)) { |
96ceddea CL |
1215 | mr = list_entry(buf->rb_all.next, struct rpcrdma_mr, mr_all); |
1216 | list_del(&mr->mr_all); | |
e2ac236c | 1217 | |
96ceddea | 1218 | spin_unlock(&buf->rb_mrlock); |
054f1557 CL |
1219 | |
1220 | /* Ensure MW is not on any rl_registered list */ | |
1221 | if (!list_empty(&mr->mr_list)) | |
1222 | list_del(&mr->mr_list); | |
1223 | ||
96ceddea | 1224 | ia->ri_ops->ro_release_mr(mr); |
e2ac236c | 1225 | count++; |
96ceddea | 1226 | spin_lock(&buf->rb_mrlock); |
e2ac236c | 1227 | } |
96ceddea | 1228 | spin_unlock(&buf->rb_mrlock); |
e2ac236c CL |
1229 | r_xprt->rx_stats.mrs_allocated = 0; |
1230 | ||
1231 | dprintk("RPC: %s: released %u MRs\n", __func__, count); | |
1232 | } | |
1233 | ||
c56c65fb TT |
1234 | void |
1235 | rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) | |
1236 | { | |
505bbe64 | 1237 | cancel_delayed_work_sync(&buf->rb_recovery_worker); |
9378b274 | 1238 | cancel_delayed_work_sync(&buf->rb_refresh_worker); |
505bbe64 | 1239 | |
ae72950a CL |
1240 | rpcrdma_sendctxs_destroy(buf); |
1241 | ||
1e465fd4 CL |
1242 | while (!list_empty(&buf->rb_recv_bufs)) { |
1243 | struct rpcrdma_rep *rep; | |
c56c65fb | 1244 | |
9d95cd53 CL |
1245 | rep = list_first_entry(&buf->rb_recv_bufs, |
1246 | struct rpcrdma_rep, rr_list); | |
1247 | list_del(&rep->rr_list); | |
13650c23 | 1248 | rpcrdma_destroy_rep(rep); |
c56c65fb TT |
1249 | } |
1250 | ||
f531a5db CL |
1251 | spin_lock(&buf->rb_reqslock); |
1252 | while (!list_empty(&buf->rb_allreqs)) { | |
1e465fd4 | 1253 | struct rpcrdma_req *req; |
4034ba04 | 1254 | |
f531a5db CL |
1255 | req = list_first_entry(&buf->rb_allreqs, |
1256 | struct rpcrdma_req, rl_all); | |
1257 | list_del(&req->rl_all); | |
1258 | ||
1259 | spin_unlock(&buf->rb_reqslock); | |
13650c23 | 1260 | rpcrdma_destroy_req(req); |
f531a5db | 1261 | spin_lock(&buf->rb_reqslock); |
1e465fd4 | 1262 | } |
f531a5db | 1263 | spin_unlock(&buf->rb_reqslock); |
4034ba04 | 1264 | |
96ceddea | 1265 | rpcrdma_mrs_destroy(buf); |
c56c65fb TT |
1266 | } |
1267 | ||
96ceddea CL |
1268 | /** |
1269 | * rpcrdma_mr_get - Allocate an rpcrdma_mr object | |
1270 | * @r_xprt: controlling transport | |
1271 | * | |
1272 | * Returns an initialized rpcrdma_mr or NULL if no free | |
1273 | * rpcrdma_mr objects are available. | |
1274 | */ | |
1275 | struct rpcrdma_mr * | |
1276 | rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt) | |
c2922c02 | 1277 | { |
346aa66b | 1278 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
96ceddea | 1279 | struct rpcrdma_mr *mr = NULL; |
346aa66b | 1280 | |
96ceddea CL |
1281 | spin_lock(&buf->rb_mrlock); |
1282 | if (!list_empty(&buf->rb_mrs)) | |
1283 | mr = rpcrdma_mr_pop(&buf->rb_mrs); | |
1284 | spin_unlock(&buf->rb_mrlock); | |
346aa66b | 1285 | |
96ceddea CL |
1286 | if (!mr) |
1287 | goto out_nomrs; | |
1288 | return mr; | |
e2ac236c | 1289 | |
96ceddea | 1290 | out_nomrs: |
1c443eff | 1291 | trace_xprtrdma_nomrs(r_xprt); |
bebd0318 CL |
1292 | if (r_xprt->rx_ep.rep_connected != -ENODEV) |
1293 | schedule_delayed_work(&buf->rb_refresh_worker, 0); | |
e2ac236c CL |
1294 | |
1295 | /* Allow the reply handler and refresh worker to run */ | |
1296 | cond_resched(); | |
1297 | ||
1298 | return NULL; | |
c2922c02 CL |
1299 | } |
1300 | ||
ec12e479 CL |
1301 | static void |
1302 | __rpcrdma_mr_put(struct rpcrdma_buffer *buf, struct rpcrdma_mr *mr) | |
1303 | { | |
1304 | spin_lock(&buf->rb_mrlock); | |
1305 | rpcrdma_mr_push(mr, &buf->rb_mrs); | |
1306 | spin_unlock(&buf->rb_mrlock); | |
1307 | } | |
1308 | ||
96ceddea CL |
1309 | /** |
1310 | * rpcrdma_mr_put - Release an rpcrdma_mr object | |
1311 | * @mr: object to release | |
1312 | * | |
1313 | */ | |
346aa66b | 1314 | void |
96ceddea | 1315 | rpcrdma_mr_put(struct rpcrdma_mr *mr) |
ec12e479 CL |
1316 | { |
1317 | __rpcrdma_mr_put(&mr->mr_xprt->rx_buf, mr); | |
1318 | } | |
1319 | ||
1320 | /** | |
1321 | * rpcrdma_mr_unmap_and_put - DMA unmap an MR and release it | |
1322 | * @mr: object to release | |
1323 | * | |
1324 | */ | |
1325 | void | |
1326 | rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr) | |
c2922c02 | 1327 | { |
96ceddea | 1328 | struct rpcrdma_xprt *r_xprt = mr->mr_xprt; |
c2922c02 | 1329 | |
2937fede | 1330 | trace_xprtrdma_dma_unmap(mr); |
ec12e479 CL |
1331 | ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, |
1332 | mr->mr_sg, mr->mr_nents, mr->mr_dir); | |
1333 | __rpcrdma_mr_put(&r_xprt->rx_buf, mr); | |
c2922c02 CL |
1334 | } |
1335 | ||
7c8d9e7c CL |
1336 | /** |
1337 | * rpcrdma_buffer_get - Get a request buffer | |
1338 | * @buffers: Buffer pool from which to obtain a buffer | |
78d506e1 | 1339 | * |
7c8d9e7c | 1340 | * Returns a fresh rpcrdma_req, or NULL if none are available. |
c56c65fb TT |
1341 | */ |
1342 | struct rpcrdma_req * | |
1343 | rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) | |
1344 | { | |
1345 | struct rpcrdma_req *req; | |
c14d86e5 | 1346 | |
a5b027e1 | 1347 | spin_lock(&buffers->rb_lock); |
e68699cc CL |
1348 | req = list_first_entry_or_null(&buffers->rb_send_bufs, |
1349 | struct rpcrdma_req, rl_list); | |
1350 | if (req) | |
1351 | list_del_init(&req->rl_list); | |
a5b027e1 | 1352 | spin_unlock(&buffers->rb_lock); |
1e465fd4 | 1353 | return req; |
c56c65fb TT |
1354 | } |
1355 | ||
7c8d9e7c CL |
1356 | /** |
1357 | * rpcrdma_buffer_put - Put request/reply buffers back into pool | |
1358 | * @req: object to return | |
1359 | * | |
c56c65fb TT |
1360 | */ |
1361 | void | |
1362 | rpcrdma_buffer_put(struct rpcrdma_req *req) | |
1363 | { | |
1364 | struct rpcrdma_buffer *buffers = req->rl_buffer; | |
1e465fd4 | 1365 | struct rpcrdma_rep *rep = req->rl_reply; |
c56c65fb | 1366 | |
1e465fd4 CL |
1367 | req->rl_reply = NULL; |
1368 | ||
a5b027e1 | 1369 | spin_lock(&buffers->rb_lock); |
7c8d9e7c | 1370 | list_add(&req->rl_list, &buffers->rb_send_bufs); |
05c97466 | 1371 | if (rep) { |
7c8d9e7c CL |
1372 | if (!rep->rr_temp) { |
1373 | list_add(&rep->rr_list, &buffers->rb_recv_bufs); | |
1374 | rep = NULL; | |
1375 | } | |
05c97466 | 1376 | } |
a5b027e1 | 1377 | spin_unlock(&buffers->rb_lock); |
7c8d9e7c CL |
1378 | if (rep) |
1379 | rpcrdma_destroy_rep(rep); | |
c56c65fb TT |
1380 | } |
1381 | ||
1382 | /* | |
1383 | * Put reply buffers back into pool when not attached to | |
b45ccfd2 | 1384 | * request. This happens in error conditions. |
c56c65fb TT |
1385 | */ |
1386 | void | |
1387 | rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep) | |
1388 | { | |
fed171b3 | 1389 | struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf; |
c56c65fb | 1390 | |
7c8d9e7c CL |
1391 | if (!rep->rr_temp) { |
1392 | spin_lock(&buffers->rb_lock); | |
1393 | list_add(&rep->rr_list, &buffers->rb_recv_bufs); | |
1394 | spin_unlock(&buffers->rb_lock); | |
1395 | } else { | |
1396 | rpcrdma_destroy_rep(rep); | |
1397 | } | |
c56c65fb TT |
1398 | } |
1399 | ||
9128c3e7 | 1400 | /** |
99ef4db3 | 1401 | * rpcrdma_alloc_regbuf - allocate and DMA-map memory for SEND/RECV buffers |
9128c3e7 | 1402 | * @size: size of buffer to be allocated, in bytes |
99ef4db3 | 1403 | * @direction: direction of data movement |
9128c3e7 CL |
1404 | * @flags: GFP flags |
1405 | * | |
54cbd6b0 CL |
1406 | * Returns an ERR_PTR, or a pointer to a regbuf, a buffer that |
1407 | * can be persistently DMA-mapped for I/O. | |
9128c3e7 CL |
1408 | * |
1409 | * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for | |
99ef4db3 CL |
1410 | * receiving the payload of RDMA RECV operations. During Long Calls |
1411 | * or Replies they may be registered externally via ro_map. | |
9128c3e7 CL |
1412 | */ |
1413 | struct rpcrdma_regbuf * | |
13650c23 CL |
1414 | rpcrdma_alloc_regbuf(size_t size, enum dma_data_direction direction, |
1415 | gfp_t flags) | |
9128c3e7 CL |
1416 | { |
1417 | struct rpcrdma_regbuf *rb; | |
9128c3e7 | 1418 | |
9128c3e7 CL |
1419 | rb = kmalloc(sizeof(*rb) + size, flags); |
1420 | if (rb == NULL) | |
54cbd6b0 | 1421 | return ERR_PTR(-ENOMEM); |
9128c3e7 | 1422 | |
54cbd6b0 | 1423 | rb->rg_device = NULL; |
99ef4db3 | 1424 | rb->rg_direction = direction; |
54cbd6b0 | 1425 | rb->rg_iov.length = size; |
9128c3e7 CL |
1426 | |
1427 | return rb; | |
54cbd6b0 | 1428 | } |
9128c3e7 | 1429 | |
54cbd6b0 CL |
1430 | /** |
1431 | * __rpcrdma_map_regbuf - DMA-map a regbuf | |
1432 | * @ia: controlling rpcrdma_ia | |
1433 | * @rb: regbuf to be mapped | |
1434 | */ | |
1435 | bool | |
1436 | __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb) | |
1437 | { | |
91a10c52 CL |
1438 | struct ib_device *device = ia->ri_device; |
1439 | ||
54cbd6b0 CL |
1440 | if (rb->rg_direction == DMA_NONE) |
1441 | return false; | |
1442 | ||
91a10c52 | 1443 | rb->rg_iov.addr = ib_dma_map_single(device, |
54cbd6b0 CL |
1444 | (void *)rb->rg_base, |
1445 | rdmab_length(rb), | |
1446 | rb->rg_direction); | |
91a10c52 | 1447 | if (ib_dma_mapping_error(device, rdmab_addr(rb))) |
54cbd6b0 CL |
1448 | return false; |
1449 | ||
91a10c52 | 1450 | rb->rg_device = device; |
54cbd6b0 CL |
1451 | rb->rg_iov.lkey = ia->ri_pd->local_dma_lkey; |
1452 | return true; | |
1453 | } | |
1454 | ||
1455 | static void | |
1456 | rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb) | |
1457 | { | |
e89e8d8f CL |
1458 | if (!rb) |
1459 | return; | |
1460 | ||
54cbd6b0 CL |
1461 | if (!rpcrdma_regbuf_is_mapped(rb)) |
1462 | return; | |
1463 | ||
1464 | ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), | |
1465 | rdmab_length(rb), rb->rg_direction); | |
1466 | rb->rg_device = NULL; | |
9128c3e7 CL |
1467 | } |
1468 | ||
1469 | /** | |
1470 | * rpcrdma_free_regbuf - deregister and free registered buffer | |
9128c3e7 CL |
1471 | * @rb: regbuf to be deregistered and freed |
1472 | */ | |
1473 | void | |
13650c23 | 1474 | rpcrdma_free_regbuf(struct rpcrdma_regbuf *rb) |
9128c3e7 | 1475 | { |
54cbd6b0 | 1476 | rpcrdma_dma_unmap_regbuf(rb); |
e531dcab | 1477 | kfree(rb); |
9128c3e7 CL |
1478 | } |
1479 | ||
c56c65fb TT |
1480 | /* |
1481 | * Prepost any receive buffer, then post send. | |
1482 | * | |
1483 | * Receive buffer is donated to hardware, reclaimed upon recv completion. | |
1484 | */ | |
1485 | int | |
1486 | rpcrdma_ep_post(struct rpcrdma_ia *ia, | |
1487 | struct rpcrdma_ep *ep, | |
1488 | struct rpcrdma_req *req) | |
1489 | { | |
ae72950a | 1490 | struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr; |
655fec69 | 1491 | int rc; |
c56c65fb | 1492 | |
01bb35c8 CL |
1493 | if (!ep->rep_send_count || |
1494 | test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) { | |
ae72950a CL |
1495 | send_wr->send_flags |= IB_SEND_SIGNALED; |
1496 | ep->rep_send_count = ep->rep_send_batch; | |
1497 | } else { | |
1498 | send_wr->send_flags &= ~IB_SEND_SIGNALED; | |
1499 | --ep->rep_send_count; | |
1500 | } | |
ab03eff5 | 1501 | |
f2877623 | 1502 | rc = ia->ri_ops->ro_send(ia, req); |
ab03eff5 | 1503 | trace_xprtrdma_post_send(req, rc); |
c56c65fb | 1504 | if (rc) |
ab03eff5 | 1505 | return -ENOTCONN; |
7a89f9c6 | 1506 | return 0; |
c56c65fb TT |
1507 | } |
1508 | ||
f531a5db | 1509 | /** |
7c8d9e7c CL |
1510 | * rpcrdma_post_recvs - Maybe post some Receive buffers |
1511 | * @r_xprt: controlling transport | |
1512 | * @temp: when true, allocate temp rpcrdma_rep objects | |
f531a5db | 1513 | * |
f531a5db | 1514 | */ |
7c8d9e7c CL |
1515 | void |
1516 | rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp) | |
f531a5db | 1517 | { |
7c8d9e7c CL |
1518 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
1519 | struct ib_recv_wr *wr, *bad_wr; | |
1520 | int needed, count, rc; | |
f531a5db | 1521 | |
7c8d9e7c CL |
1522 | needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1); |
1523 | if (buf->rb_posted_receives > needed) | |
1524 | return; | |
1525 | needed -= buf->rb_posted_receives; | |
f531a5db | 1526 | |
7c8d9e7c CL |
1527 | count = 0; |
1528 | wr = NULL; | |
1529 | while (needed) { | |
1530 | struct rpcrdma_regbuf *rb; | |
1531 | struct rpcrdma_rep *rep; | |
f531a5db | 1532 | |
7c8d9e7c CL |
1533 | spin_lock(&buf->rb_lock); |
1534 | rep = list_first_entry_or_null(&buf->rb_recv_bufs, | |
1535 | struct rpcrdma_rep, rr_list); | |
1536 | if (likely(rep)) | |
1537 | list_del(&rep->rr_list); | |
1538 | spin_unlock(&buf->rb_lock); | |
1539 | if (!rep) { | |
1540 | if (rpcrdma_create_rep(r_xprt, temp)) | |
1541 | break; | |
1542 | continue; | |
1543 | } | |
f531a5db | 1544 | |
7c8d9e7c CL |
1545 | rb = rep->rr_rdmabuf; |
1546 | if (!rpcrdma_regbuf_is_mapped(rb)) { | |
1547 | if (!__rpcrdma_dma_map_regbuf(&r_xprt->rx_ia, rb)) { | |
1548 | rpcrdma_recv_buffer_put(rep); | |
1549 | break; | |
1550 | } | |
1551 | } | |
f531a5db | 1552 | |
7c8d9e7c CL |
1553 | trace_xprtrdma_post_recv(rep->rr_recv_wr.wr_cqe); |
1554 | rep->rr_recv_wr.next = wr; | |
1555 | wr = &rep->rr_recv_wr; | |
1556 | ++count; | |
1557 | --needed; | |
1558 | } | |
1559 | if (!count) | |
1560 | return; | |
1561 | ||
1562 | rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr, &bad_wr); | |
1563 | if (rc) { | |
1564 | for (wr = bad_wr; wr; wr = wr->next) { | |
1565 | struct rpcrdma_rep *rep; | |
1566 | ||
1567 | rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr); | |
1568 | rpcrdma_recv_buffer_put(rep); | |
1569 | --count; | |
1570 | } | |
1571 | } | |
1572 | buf->rb_posted_receives += count; | |
1573 | trace_xprtrdma_post_recvs(r_xprt, count, rc); | |
f531a5db | 1574 | } |